diff --git a/.editorconfig b/.editorconfig index 321808ebaecf..103fe51237c8 100644 --- a/.editorconfig +++ b/.editorconfig @@ -16,6 +16,14 @@ profile = black indent_style = space indent_size = 2 +[*.md] +indent_style = space +indent_size = 2 + [*.yml] indent_style = space indent_size = 2 + +[*.toml] +indent_style = space +indent_size = 4 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 5270bf89ae33..7e0910c449e9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,10 @@ README.md @jafermarq @tanertopal @danieljanes # Flower Baselines -/baselines @jafermarq @tanertopal @danieljanes +/baselines @jafermarq @danieljanes + +# Flower Benchmarks +/benchmarks @jafermarq @danieljanes # Flower Datasets /datasets @jafermarq @tanertopal @danieljanes @@ -27,3 +30,9 @@ README.md @jafermarq @tanertopal @danieljanes # GitHub Actions and Workflows /.github/workflows @Robert-Steiner @tanertopal @danieljanes /.github/actions @Robert-Steiner @tanertopal @danieljanes + +# Docker-related files +/.devcontainer @Robert-Steiner @Moep90 @tanertopal @danieljanes +**/Dockerfile @Robert-Steiner @Moep90 @tanertopal @danieljanes +**/*.Dockerfile @Robert-Steiner @Moep90 @tanertopal @danieljanes +/src/docker @Robert-Steiner @Moep90 @tanertopal @danieljanes diff --git a/.github/actions/bootstrap/action.yml b/.github/actions/bootstrap/action.yml index 4cde8dddfa3f..a8a98acdf304 100644 --- a/.github/actions/bootstrap/action.yml +++ b/.github/actions/bootstrap/action.yml @@ -3,7 +3,7 @@ description: "Bootstrap Python environment (install and configure Python version inputs: python-version: description: "Version range or exact version of Python or PyPy to use, using SemVer's version range syntax." - default: 3.8 + default: 3.9 pip-version: description: "Version of pip to be installed using pip" default: 24.1.2 diff --git a/.github/workflows/_docker-build.yml b/.github/workflows/_docker-build.yml index a3373c6e93fa..b5c27c9b4834 100644 --- a/.github/workflows/_docker-build.yml +++ b/.github/workflows/_docker-build.yml @@ -36,7 +36,7 @@ permissions: jobs: build: name: Build image - runs-on: ubuntu-22.04 + runs-on: ${{ matrix.platform.runner-os }} timeout-minutes: 180 outputs: build-id: ${{ steps.build-id.outputs.id }} @@ -44,10 +44,8 @@ jobs: fail-fast: true matrix: platform: [ - # build-push action and qemu use different platform names - # therefore we create a map - { name: "amd64", qemu: "", docker: "linux/amd64" }, - { name: "arm64", qemu: "arm64", docker: "linux/arm64" }, + { name: "amd64", docker: "linux/amd64", runner-os: "ubuntu-22.04" }, + { name: "arm64", docker: "linux/arm64", runner-os: "ubuntu-4-core-arm64" }, ] steps: - name: Create build id @@ -79,12 +77,6 @@ jobs: print(build_args, file=fh) print("EOF", file=fh) - - name: Set up QEMU - if: matrix.platform.qemu != '' - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - with: - platforms: ${{ matrix.platform.qemu }} - - name: Extract metadata (tags, labels) for Docker id: meta uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 @@ -92,7 +84,7 @@ jobs: images: ${{ inputs.namespace-repository }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 @@ -104,11 +96,12 @@ jobs: uses: Wandalen/wretry.action@6feedb7dedadeb826de0f45ff482b53b379a7844 # v3.5.0 id: build with: - action: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 + action: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 attempt_limit: 60 # 60 attempts * (9 secs delay + 1 sec retry) = ~10 mins attempt_delay: 9000 # 9 secs with: | pull: true + sbom: true platforms: ${{ matrix.platform.docker }} context: "{{defaultContext}}:${{ inputs.file-dir }}" outputs: type=image,name=${{ inputs.namespace-repository }},push-by-digest=true,name-canonical=true,push=true @@ -122,7 +115,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: digests-${{ steps.build-id.outputs.id }}-${{ matrix.platform.name }} path: /tmp/digests/* @@ -152,7 +145,7 @@ jobs: tags: ${{ inputs.tags }} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 - name: Login to Docker Hub uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 diff --git a/.github/workflows/cache-cleanup.yml b/.github/workflows/cache-cleanup.yml index aa2da65d0a47..dca5505f7bf6 100644 --- a/.github/workflows/cache-cleanup.yml +++ b/.github/workflows/cache-cleanup.yml @@ -34,7 +34,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: Cleanup caches by directories # Only keep caches that match the latest keys for each directory diff --git a/.github/workflows/datasets-e2e.yml b/.github/workflows/datasets-e2e.yml index 2a73a8538b14..dbd90635c74e 100644 --- a/.github/workflows/datasets-e2e.yml +++ b/.github/workflows/datasets-e2e.yml @@ -45,7 +45,7 @@ jobs: - name: Bootstrap uses: ./.github/actions/bootstrap with: - python-version: 3.8 + python-version: 3.9 - name: Install dependencies run: python -m poetry install - name: Run tests diff --git a/.github/workflows/datasets.yml b/.github/workflows/datasets.yml index ca5aa29248cf..860d944696f9 100644 --- a/.github/workflows/datasets.yml +++ b/.github/workflows/datasets.yml @@ -37,7 +37,7 @@ jobs: # In case of a mismatch, the job has to download Python to install it. # Note: Due to a bug in actions/setup-python, we have to put "3.10" in # quotes as it will otherwise assume "3.1" - python: [3.8, 3.9, '3.10', '3.11'] + python: ['3.9', '3.10', '3.11'] name: Python ${{ matrix.python }} diff --git a/.github/workflows/docker-build-main.yml b/.github/workflows/docker-build-main.yml new file mode 100644 index 000000000000..81ef845eae29 --- /dev/null +++ b/.github/workflows/docker-build-main.yml @@ -0,0 +1,69 @@ +name: Build Docker Images Main Branch + +on: + push: + branches: + - 'main' + +jobs: + parameters: + if: github.repository == 'adap/flower' + name: Collect docker build parameters + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + pip-version: ${{ steps.versions.outputs.pip-version }} + setuptools-version: ${{ steps.versions.outputs.setuptools-version }} + flwr-version-ref: ${{ steps.versions.outputs.flwr-version-ref }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: ./.github/actions/bootstrap + id: bootstrap + + - id: versions + run: | + echo "pip-version=${{ steps.bootstrap.outputs.pip-version }}" >> "$GITHUB_OUTPUT" + echo "setuptools-version=${{ steps.bootstrap.outputs.setuptools-version }}" >> "$GITHUB_OUTPUT" + echo "flwr-version-ref=git+${{ github.server_url }}/${{ github.repository }}.git@${{ github.sha }}" >> "$GITHUB_OUTPUT" + + build-docker-base-images: + name: Build base images + if: github.repository == 'adap/flower' + uses: ./.github/workflows/_docker-build.yml + needs: parameters + with: + namespace-repository: flwr/base + file-dir: src/docker/base/ubuntu + build-args: | + PIP_VERSION=${{ needs.parameters.outputs.pip-version }} + SETUPTOOLS_VERSION=${{ needs.parameters.outputs.setuptools-version }} + FLWR_VERSION_REF=${{ needs.parameters.outputs.flwr-version-ref }} + tags: unstable + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + build-docker-binary-images: + name: Build binary images + if: github.repository == 'adap/flower' + uses: ./.github/workflows/_docker-build.yml + needs: build-docker-base-images + strategy: + fail-fast: false + matrix: + images: [ + { repository: "flwr/superlink", file_dir: "src/docker/superlink" }, + { repository: "flwr/supernode", file_dir: "src/docker/supernode" }, + { repository: "flwr/serverapp", file_dir: "src/docker/serverapp" }, + { repository: "flwr/superexec", file_dir: "src/docker/superexec" }, + { repository: "flwr/clientapp", file_dir: "src/docker/clientapp" } + ] + with: + namespace-repository: ${{ matrix.images.repository }} + file-dir: ${{ matrix.images.file_dir }} + build-args: BASE_IMAGE=unstable + tags: unstable + secrets: + dockerhub-user: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/docker-readme.yml b/.github/workflows/docker-readme.yml new file mode 100644 index 000000000000..29dd787d638e --- /dev/null +++ b/.github/workflows/docker-readme.yml @@ -0,0 +1,51 @@ +name: Update Docker READMEs + +on: + push: + branches: + - 'main' + paths: + - 'src/docker/**/README.md' + +jobs: + collect: + if: ${{ github.repository == 'adap/flower' }} + name: Collect Docker READMEs + runs-on: ubuntu-22.04 + timeout-minutes: 10 + outputs: + readme_files: ${{ steps.filter.outputs.readme_files }} + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 + id: filter + with: + list-files: "json" + filters: | + readme: + - 'src/docker/**/README.md' + + update: + if: ${{ needs.collect.outputs.readme_files != '' && toJson(fromJson(needs.collect.outputs.readme_files)) != '[]' }} + name: Update Docker READMEs + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: collect + strategy: + matrix: + readme_path: ${{ fromJSON(needs.collect.outputs.readme_files) }} + + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - id: repository + run: echo "name=$(basename $(dirname ${{ matrix.readme_path }}))" >> "$GITHUB_OUTPUT" + + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@e98e4d1628a5f3be2be7c231e50981aee98723ae # v4.0.0 + with: + repository: flwr/${{ steps.repository.outputs.name }} + readme-filepath: ${{ matrix.readme_path }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 49e5b7bf1b36..5e93da349602 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -51,6 +51,64 @@ jobs: short_sha: ${{ steps.upload.outputs.SHORT_SHA }} dir: ${{ steps.upload.outputs.DIR }} + superexec: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11"] + directory: [e2e-bare-auth] + connection: [secure, insecure] + engine: [deployment-engine, simulation-engine] + authentication: [no-auth, client-auth] + exclude: + - connection: insecure + authentication: client-auth + name: | + SuperExec / + Python ${{ matrix.python-version }} / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + defaults: + run: + working-directory: e2e/${{ matrix.directory }} + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: ${{ matrix.python-version }} + poetry-skip: 'true' + - name: Install Flower from repo + if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }} + working-directory: ./ + run: | + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install ".[simulation]" + else + python -m pip install . + fi + - name: Download and install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + # Define base URL for wheel file + WHEEL_URL="https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }}" + if [[ "${{ matrix.engine }}" == "simulation-engine" ]]; then + python -m pip install "flwr[simulation] @ ${WHEEL_URL}" + else + python -m pip install "${WHEEL_URL}" + fi + - name: > + Run Exec API test / + ${{ matrix.connection }} / + ${{ matrix.authentication }} / + ${{ matrix.engine }} + working-directory: e2e/${{ matrix.directory }} + run: ./../test_exec_api.sh "${{ matrix.connection }}" "${{ matrix.authentication}}" "${{ matrix.engine }}" + frameworks: runs-on: ubuntu-22.04 timeout-minutes: 10 @@ -127,7 +185,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 - name: Install build tools run: | python -m pip install -U pip==23.3.1 @@ -146,8 +204,6 @@ jobs: if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} run: | python -m pip install https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }} - - name: Install e2e components - run: pip install . - name: Download dataset if: ${{ matrix.dataset }} run: python -c "${{ matrix.dataset }}" @@ -172,7 +228,7 @@ jobs: run: ./../test_superlink.sh bare sqlite - name: Run driver test with client authentication if: ${{ matrix.directory == 'e2e-bare-auth' }} - run: ./../test_superlink.sh bare client-auth + run: ./../test_superlink.sh "${{ matrix.directory }}" client-auth - name: Run reconnection test with SQLite database if: ${{ matrix.directory == 'e2e-bare' }} run: ./../test_reconnection.sh sqlite @@ -255,3 +311,39 @@ jobs: if grep -q "ERROR" flwr_output.log; then exit 1 fi + + build_and_install: + runs-on: ubuntu-22.04 + timeout-minutes: 10 + needs: wheel + strategy: + matrix: + framework: ["numpy"] + python-version: ["3.9", "3.10", "3.11"] + + name: | + Build & Install / + Python ${{ matrix.python-version }} / + ${{ matrix.framework }} + + steps: + - uses: actions/checkout@v4 + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: ${{ matrix.python-version }} + poetry-skip: 'true' + - name: Install Flower from repo + if: ${{ github.repository != 'adap/flower' || github.event.pull_request.head.repo.fork || github.actor == 'dependabot[bot]' }} + run: | + python -m pip install . + - name: Install Flower wheel from artifact store + if: ${{ github.repository == 'adap/flower' && !github.event.pull_request.head.repo.fork && github.actor != 'dependabot[bot]' }} + run: | + python -m pip install https://${{ env.ARTIFACT_BUCKET }}/py/${{ needs.wheel.outputs.dir }}/${{ needs.wheel.outputs.short_sha }}/${{ needs.wheel.outputs.whl_path }} + - name: Create project, build, and install it + run: | + flwr new tmp-${{ matrix.framework }} --framework ${{ matrix.framework }} --username gh_ci + cd tmp-${{ matrix.framework }} + flwr build + flwr install *.fab diff --git a/.github/workflows/framework-release.yml b/.github/workflows/framework-release.yml index 812d5b1e398e..e608329872de 100644 --- a/.github/workflows/framework-release.yml +++ b/.github/workflows/framework-release.yml @@ -16,6 +16,8 @@ jobs: if: ${{ github.repository == 'adap/flower' }} name: Publish release runs-on: ubuntu-22.04 + outputs: + flwr-version: ${{ steps.publish.outputs.flwr-version }} steps: - name: Checkout code uses: actions/checkout@v4 @@ -26,10 +28,12 @@ jobs: uses: ./.github/actions/bootstrap - name: Get artifacts and publish + id: publish env: GITHUB_REF: ${{ github.ref }} run: | TAG_NAME=$(echo "${GITHUB_REF_NAME}" | cut -c2-) + echo "flwr-version=$TAG_NAME" >> "$GITHUB_OUTPUT" wheel_name="flwr-${TAG_NAME}-py3-none-any.whl" tar_name="flwr-${TAG_NAME}.tar.gz" @@ -67,8 +71,7 @@ jobs: - id: matrix run: | - FLWR_VERSION=$(poetry version -s) - python dev/build-docker-image-matrix.py --flwr-version "${FLWR_VERSION}" > matrix.json + python dev/build-docker-image-matrix.py --flwr-version "${{ needs.publish.outputs.flwr-version }}" > matrix.json echo "matrix=$(cat matrix.json)" >> $GITHUB_OUTPUT build-base-images: diff --git a/.github/workflows/framework.yml b/.github/workflows/framework.yml index a5d2b71f7beb..a8ff69204b58 100644 --- a/.github/workflows/framework.yml +++ b/.github/workflows/framework.yml @@ -25,7 +25,7 @@ jobs: # In case of a mismatch, the job has to download Python to install it. # Note: Due to a bug in actions/setup-python, we have to put "3.10" in # quotes as it will otherwise assume "3.1" - python: [3.8, 3.9, '3.10', '3.11'] + python: ['3.9', '3.10', '3.11'] name: Python ${{ matrix.python }} diff --git a/.github/workflows/update_translations.yml b/.github/workflows/update_translations.yml new file mode 100644 index 000000000000..9419f4aaef25 --- /dev/null +++ b/.github/workflows/update_translations.yml @@ -0,0 +1,79 @@ +name: Translations + +on: + schedule: + - cron: '0 0 * * *' # Runs every day at midnight + workflow_dispatch: # Allows to manually trigger the workflow + +jobs: + update-and-pr: + runs-on: ubuntu-22.04 + permissions: + contents: write + pull-requests: write + env: + branch-name: auto-update-trans-text + name: Update text + steps: + - uses: actions/checkout@v4 + + - name: Bootstrap + uses: ./.github/actions/bootstrap + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m poetry install + pip install sphinx==7.3.7 + + - name: Install pandoc + uses: nikeee/setup-pandoc@v1 + + - name: Update text and translations for all locales + run: | + cd doc + make update-text + for langDir in locales/*; do + if [ -d "$langDir" ]; then + lang=$(basename $langDir) + echo "Updating language $lang" + make update-lang lang=$lang + fi + done + + - name: Commit changes + run: | + git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --local user.name "github-actions[bot]" + git add doc/locales + git commit -m "Update text and language files" + continue-on-error: true + + - name: Calculate diff # Even without doc changes the update-lang command will generate 228 additions and 60 deletions, so we only want to open a PR when there is more + id: calculate_diff + run: | + additions=$(git diff --numstat HEAD^1 | awk '{s+=$1} END {print s}') + deletions=$(git diff --numstat HEAD^1 | awk '{s+=$2} END {print s}') + echo "Additions: $additions" + echo "Deletions: $deletions" + echo "additions=$additions" >> $GITHUB_OUTPUT + echo "deletions=$deletions" >> $GITHUB_OUTPUT + + - name: Push changes + if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 + uses: ad-m/github-push-action@master + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + branch: '${{ env.branch-name }}' + + - name: Create Pull Request + if: steps.calculate_diff.outputs.additions > 228 && steps.calculate_diff.outputs.deletions > 60 + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + branch: '${{ env.branch-name }}' + delete-branch: true + title: 'docs(framework:skip) Update source texts for translations (automated)' + body: 'This PR is auto-generated to update text and language files.' + draft: false diff --git a/README.md b/README.md index 1dd686e5f1b6..7aa73fe609bb 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/adap/flower/blob/main/CONTRIBUTING.md) ![Build](https://github.com/adap/flower/actions/workflows/framework.yml/badge.svg) [![Downloads](https://static.pepy.tech/badge/flwr)](https://pepy.tech/project/flwr) +[![Docker Hub](https://img.shields.io/badge/Docker%20Hub-flwr-blue)](https://hub.docker.com/u/flwr) [![Slack](https://img.shields.io/badge/Chat-Slack-red)](https://flower.ai/join-slack) Flower (`flwr`) is a framework for building federated learning systems. The @@ -101,6 +102,7 @@ Flower Baselines is a collection of community-contributed projects that reproduc - [FedNova](https://github.com/adap/flower/tree/main/baselines/fednova) - [HeteroFL](https://github.com/adap/flower/tree/main/baselines/heterofl) - [FedAvgM](https://github.com/adap/flower/tree/main/baselines/fedavgm) +- [FedRep](https://github.com/adap/flower/tree/main/baselines/fedrep) - [FedStar](https://github.com/adap/flower/tree/main/baselines/fedstar) - [FedWav2vec2](https://github.com/adap/flower/tree/main/baselines/fedwav2vec2) - [FjORD](https://github.com/adap/flower/tree/main/baselines/fjord) @@ -143,11 +145,10 @@ Other [examples](https://github.com/adap/flower/tree/main/examples): - [PyTorch: From Centralized to Federated](https://github.com/adap/flower/tree/main/examples/pytorch-from-centralized-to-federated) - [Vertical FL](https://github.com/adap/flower/tree/main/examples/vertical-fl) - [Federated Finetuning of OpenAI's Whisper](https://github.com/adap/flower/tree/main/examples/whisper-federated-finetuning) -- [Federated Finetuning of Large Language Model](https://github.com/adap/flower/tree/main/examples/llm-flowertune) -- [Federated Finetuning of a Vision Transformer](https://github.com/adap/flower/tree/main/examples/vit-finetune) +- [Federated Finetuning of Large Language Model](https://github.com/adap/flower/tree/main/examples/flowertune-llm) +- [Federated Finetuning of a Vision Transformer](https://github.com/adap/flower/tree/main/examples/flowertune-vit) - [Advanced Flower with TensorFlow/Keras](https://github.com/adap/flower/tree/main/examples/advanced-tensorflow) - [Advanced Flower with PyTorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch) -- Single-Machine Simulation of Federated Learning Systems ([PyTorch](https://github.com/adap/flower/tree/main/examples/simulation-pytorch)) ([Tensorflow](https://github.com/adap/flower/tree/main/examples/simulation-tensorflow)) - [Comprehensive Flower+XGBoost](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) - [Flower through Docker Compose and with Grafana dashboard](https://github.com/adap/flower/tree/main/examples/flower-via-docker-compose) - [Flower with KaplanMeierFitter from the lifelines library](https://github.com/adap/flower/tree/main/examples/federated-kaplan-meier-fitter) diff --git a/baselines/README.md b/baselines/README.md index 3a84df02d8de..75bcccb68b2a 100644 --- a/baselines/README.md +++ b/baselines/README.md @@ -1,10 +1,9 @@ # Flower Baselines +> [!NOTE] > We are changing the way we structure the Flower baselines. While we complete the transition to the new format, you can still find the existing baselines in the `flwr_baselines` directory. Currently, you can make use of baselines for [FedAvg](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/fedavg_mnist), [FedOpt](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/adaptive_federated_optimization), and [LEAF-FEMNIST](https://github.com/adap/flower/tree/main/baselines/flwr_baselines/flwr_baselines/publications/leaf/femnist). -> The documentation below has been updated to reflect the new way of using Flower baselines. - ## Structure @@ -15,17 +14,15 @@ baselines// ├── README.md ├── pyproject.toml └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files + └── *.py # several .py files ``` -Please note that some baselines might include additional files (e.g. a `requirements.txt`) or a hierarchy of `.yaml` files for [Hydra](https://hydra.cc/). ## Running the baselines -Each baseline is self-contained in its own directory. Furthermore, each baseline defines its own Python environment using [Poetry](https://python-poetry.org/docs/) via a `pyproject.toml` file and [`pyenv`](https://github.com/pyenv/pyenv). If you haven't setup `Poetry` and `pyenv` already on your machine, please take a look at the [Documentation](https://flower.ai/docs/baselines/how-to-use-baselines.html#setting-up-your-machine) for a guide on how to do so. +> [!NOTE] +> We are in the process of migrating all baselines to use `flwr run`. Those baselines that remain using the previous system (i.e. using [Poetry](https://python-poetry.org/), [Hydra](https://hydra.cc/) and [start_simulation](https://flower.ai/docs/framework/ref-api/flwr.simulation.start_simulation.html)) might require you to first setup `Poetry` and `pyenv` already on your machine, please take a look at the [Documentation](https://flower.ai/docs/baselines/how-to-use-baselines.html#setting-up-your-machine) for a guide on how to do so. -Assuming `pyenv` and `Poetry` are already installed on your system. Running a baseline can be done by: +Each baseline is self-contained in its own directory. To run a baseline: 1. Cloning the flower repository @@ -34,11 +31,7 @@ Assuming `pyenv` and `Poetry` are already installed on your system. Running a ba ``` 2. Navigate inside the directory of the baseline you'd like to run. -3. Follow the `[Environment Setup]` instructions in the `README.md`. In most cases this will require you to just do: - - ```bash - poetry install - ``` +3. Follow the `[Environment Setup]` instructions in the `README.md`. 4. Run the baseline as indicated in the `[Running the Experiments]` section in the `README.md` or in the `[Expected Results]` section to reproduce the experiments in the paper. @@ -46,17 +39,22 @@ Assuming `pyenv` and `Poetry` are already installed on your system. Running a ba Do you have a new federated learning paper and want to add a new baseline to Flower? Or do you want to add an experiment to an existing baseline paper? Great, we really appreciate your contribution !! +> [!TIP] +> A more verbose version of these steps can be found in the [Flower Baselines documentation](https://flower.ai/docs/baselines/how-to-contribute-baselines.html). + The steps to follow are: +1. Create a new Python 3.10 environment and install Flower (`pip install flwr`) 1. Fork the Flower repo and clone it into your machine. -2. Navigate to the `baselines/` directory, choose a single-word (and **lowercase**) name for your baseline, and from there run: +2. Navigate to the `baselines/` directory, from there and with your environment activated, run: ```bash - # This will create a new directory with the same structure as `baseline_template`. - ./dev/create-baseline.sh + # Choose option "Flower Baseline" when prompted + flwr new ``` -3. Then, go inside your baseline directory and continue with the steps detailed in `EXTENDED_README.md` and `README.md`. -4. Once your code is ready and you have checked that following the instructions in your `README.md` the Python environment can be created correctly and that running the code following your instructions can reproduce the experiments in the paper, you just need to create a Pull Request (PR). Then, the process to merge your baseline into the Flower repo will begin! +3. Then, go inside your baseline directory and continue with the steps detailed in the `README.md`. +4. Once your code is ready, check that you have completed all the sections in the `README.md` and that, if a new environment is created, your baseline still runs (i.e. play the role of a person running the baseline you want to contribute). +5. Create a Pull Request (PR). Then, the process to merge your baseline into the Flower repo will begin! Further resources: diff --git a/baselines/baseline_template/EXTENDED_README.md b/baselines/baseline_template/EXTENDED_README.md deleted file mode 100644 index 9c8f5bc72fa9..000000000000 --- a/baselines/baseline_template/EXTENDED_README.md +++ /dev/null @@ -1,123 +0,0 @@ - -# Extended Readme - -> The baselines are expected to run in a machine running Ubuntu 22.04 - -While `README.md` should include information about the baseline you implement and how to run it, this _extended_ readme provides info on what's the expected directory structure for a new baseline and more generally the instructions to follow before your baseline can be merged into the Flower repository. Please follow closely these instructions. It is likely that you have already completed steps 1-2. - -1. Fork the Flower repository and clone it. -2. Navigate to the `baselines/` directory and from there run: - ```bash - # This will create a new directory with the same structure as this `baseline_template` directory. - ./dev/create-baseline.sh - ``` -3. All your code and configs should go into a sub-directory with the same name as the name of your baseline. - * The sub-directory contains a series of Python scripts that you can edit. Please stick to these files and consult with us if you need additional ones. - * There is also a basic config structure in `/conf` ready be parsed by [Hydra](https://hydra.cc/) when executing your `main.py`. -4. Therefore, the directory structure in your baseline should look like: - ```bash - baselines/ - ├── README.md # describes your baseline and everything needed to use it - ├── EXTENDED_README.md # to remove before creating your PR - ├── pyproject.toml # details your Python environment - └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - - ``` -> :warning: Make sure the variable `name` in `pyproject.toml` is set to the name of the sub-directory containing all your code. - -5. Add your dependencies to the `pyproject.toml` (see below a few examples on how to do it). Read more about Poetry below in this `EXTENDED_README.md`. -6. Regularly check that your coding style and the documentation you add follow good coding practices. To test whether your code meets the requirements, please run the following: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/test-baseline.sh - ./dev/test-baseline-structure.sh - ``` - Both `test-baseline.sh` and `test-baseline-structure.sh` will also be automatically run when you create a PR, and both tests need to pass for the baseline to be merged. - To automatically solve some formatting issues and apply easy fixes, please run the formatting script: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/format-baseline.sh - ``` -7. Ensure that the Python environment for your baseline can be created without errors by simply running `poetry install` and that this is properly described later when you complete the `Environment Setup` section in `README.md`. This is specially important if your environment requires additional steps after doing `poetry install`. -8. Ensure that your baseline runs with default arguments by running `poetry run python -m .main`. Then, describe this and other forms of running your code in the `Running the Experiments` section in `README.md`. -9. Once your code is ready and you have checked: - * that following the instructions in your `README.md` the Python environment can be created correctly - - * that running the code following your instructions can reproduce the experiments in the paper - - , then you just need to create a Pull Request (PR) to kickstart the process of merging your baseline into the Flower repository. - -> Once you are happy to merge your baseline contribution, please delete this `EXTENDED_README.md` file. - - -## About Poetry - -We use Poetry to manage the Python environment for each individual baseline. You can follow the instructions [here](https://python-poetry.org/docs/) to install Poetry in your machine. - - -### Specifying a Python Version (optional) -By default, Poetry will use the Python version in your system. In some settings, you might want to specify a particular version of Python to use inside your Poetry environment. You can do so with [`pyenv`](https://github.com/pyenv/pyenv). Check the documentation for the different ways of installing `pyenv`, but one easy way is using the [automatic installer](https://github.com/pyenv/pyenv-installer): -```bash -curl https://pyenv.run | bash # then, don't forget links to your .bashrc/.zshrc -``` - -You can then install any Python version with `pyenv install ` (e.g. `pyenv install 3.9.17`). Then, in order to use that version for your baseline, you'd do the following: - -```bash -# cd to your baseline directory (i.e. where the `pyproject.toml` is) -pyenv local - -# set that version for poetry -poetry env use - -# then you can install your Poetry environment (see the next setp) -``` - -### Installing Your Environment -With the Poetry tool already installed, you can create an environment for this baseline with commands: -```bash -# run this from the same directory as the `pyproject.toml` file is -poetry install -``` - -This will create a basic Python environment with just Flower and additional packages, including those needed for simulation. Next, you should add the dependencies for your code. It is **critical** that you fix the version of the packages you use using a `=` not a `=^`. You can do so via [`poetry add`](https://python-poetry.org/docs/cli/#add). Below are some examples: - -```bash -# For instance, if you want to install tqdm -poetry add tqdm==4.65.0 - -# If you already have a requirements.txt, you can add all those packages (but ensure you have fixed the version) in one go as follows: -poetry add $( cat requirements.txt ) -``` -With each `poetry add` command, the `pyproject.toml` gets automatically updated so you don't need to keep that `requirements.txt` as part of this baseline. - - -More critically however, is adding your ML framework of choice to the list of dependencies. For some frameworks you might be able to do so with the `poetry add` command. Check [the Poetry documentation](https://python-poetry.org/docs/cli/#add) for how to add packages in various ways. For instance, let's say you want to use PyTorch: - -```bash -# with plain `pip` you'd run a command such as: -pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 - -# to add the same 3 dependencies to your Poetry environment you'd need to add the URL to the wheel that the above pip command auto-resolves for you. -# You can find those wheels in `https://download.pytorch.org/whl/cu117`. Copy the link and paste it after the `poetry add` command. -# For instance to add `torch==1.13.1+cu117` and a x86 Linux system with Python3.8 you'd: -poetry add https://download.pytorch.org/whl/cu117/torch-1.13.1%2Bcu117-cp38-cp38-linux_x86_64.whl -# you'll need to repeat this for both `torchvision` and `torchaudio` -``` -The above is just an example of how you can add these dependencies. Please refer to the Poetry documentation to extra reference. - -If all attempts fail, you can still install packages via standard `pip`. You'd first need to source/activate your Poetry environment. -```bash -# first ensure you have created your environment -# and installed the base packages provided in the template -poetry install - -# then activate it -poetry shell -``` -Now you are inside your environment (pretty much as when you use `virtualenv` or `conda`) so you can install further packages with `pip`. Please note that, unlike with `poetry add`, these extra requirements won't be captured by `pyproject.toml`. Therefore, please ensure that you provide all instructions needed to: (1) create the base environment with Poetry and (2) install any additional dependencies via `pip` when you complete your `README.md`. \ No newline at end of file diff --git a/baselines/baseline_template/README.md b/baselines/baseline_template/README.md deleted file mode 100644 index ee6e1e96976f..000000000000 --- a/baselines/baseline_template/README.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: title of the paper -url: URL to the paper page (not the pdf) -labels: [label1, label2] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. system heterogeneity, image classification, asynchronous, weight sharing, cross-silo). Do not use "" -dataset: [dataset1, dataset2] # list of datasets you include in your baseline. Do not use "" ---- - -# :warning: *_Title of your baseline_* - -> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. - -> :warning: This is the template to follow when creating a new Flower Baseline. Please follow the instructions in `EXTENDED_README.md` - -> :warning: Please follow the instructions carefully. You can see the [FedProx-MNIST baseline](https://github.com/adap/flower/tree/main/baselines/fedprox) as an example of a baseline that followed this guide. - -> :warning: Please complete the metadata section at the very top of this README. This generates a table at the top of the file that will facilitate indexing baselines. - -**Paper:** :warning: *_add the URL of the paper page (not to the .pdf). For instance if you link a paper on ArXiv, add here the URL to the abstract page (e.g. https://arxiv.org/abs/1512.03385). If your paper is in from a journal or conference proceedings, please follow the same logic._* - -**Authors:** :warning: *_list authors of the paper_* - -**Abstract:** :warning: *_add here the abstract of the paper you are implementing_* - - -## About this baseline - -**What’s implemented:** :warning: *_Concisely describe what experiment(s) in the publication can be replicated by running the code. Please only use a few sentences. Start with: “The code in this directory …”_* - -**Datasets:** :warning: *_List the datasets you used (if you used a medium to large dataset, >10GB please also include the sizes of the dataset)._* - -**Hardware Setup:** :warning: *_Give some details about the hardware (e.g. a server with 8x V100 32GB and 256GB of RAM) you used to run the experiments for this baseline. Someone out there might not have access to the same resources you have so, could list the absolute minimum hardware needed to run the experiment in a reasonable amount of time ? (e.g. minimum is 1x 16GB GPU otherwise a client model can’t be trained with a sufficiently large batch size). Could you test this works too?_* - -**Contributors:** :warning: *_let the world know who contributed to this baseline. This could be either your name, your name and affiliation at the time, or your GitHub profile name if you prefer. If multiple contributors signed up for this baseline, please list yourself and your colleagues_* - - -## Experimental Setup - -**Task:** :warning: *_what’s the primary task that is being federated? (e.g. image classification, next-word prediction). If you have experiments for several, please list them_* - -**Model:** :warning: *_provide details about the model you used in your experiments (if more than use a list). If your model is small, describing it as a table would be :100:. Some FL methods do not use an off-the-shelve model (e.g. ResNet18) instead they create your own. If this is your case, please provide a summary here and give pointers to where in the paper (e.g. Appendix B.4) is detailed._* - -**Dataset:** :warning: *_Earlier you listed already the datasets that your baseline uses. Now you should include a breakdown of the details about each of them. Please include information about: how the dataset is partitioned (e.g. LDA with alpha 0.1 as default and all clients have the same number of training examples; or each client gets assigned a different number of samples following a power-law distribution with each client only instances of 2 classes)? if your dataset is naturally partitioned just state “naturally partitioned”; how many partitions there are (i.e. how many clients)? Please include this an all information relevant about the dataset and its partitioning into a table._* - -**Training Hyperparameters:** :warning: *_Include a table with all the main hyperparameters in your baseline. Please show them with their default value._* - - -## Environment Setup - -:warning: _The Python environment for all baselines should follow these guidelines in the `EXTENDED_README`. Specify the steps to create and activate your environment. If there are any external system-wide requirements, please include instructions for them too. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ - - -## Running the Experiments - -:warning: _Provide instructions on the steps to follow to run all the experiments._ -```bash -# The main experiment implemented in your baseline using default hyperparameters (that should be setup in the Hydra configs) should run (including dataset download and necessary partitioning) by executing the command: - -poetry run python -m .main # where is the name of this directory and that of the only sub-directory in this directory (i.e. where all your source code is) - -# If you are using a dataset that requires a complicated download (i.e. not using one natively supported by TF/PyTorch) + preprocessing logic, you might want to tell people to run one script first that will do all that. Please ensure the download + preprocessing can be configured to suit (at least!) a different download directory (and use as default the current directory). The expected command to run to do this is: - -poetry run python -m .dataset_preparation - -# It is expected that you baseline supports more than one dataset and different FL settings (e.g. different number of clients, dataset partitioning methods, etc). Please provide a list of commands showing how these experiments are run. Include also a short explanation of what each one does. Here it is expected you'll be using the Hydra syntax to override the default config. - -poetry run python -m .main -. -. -. -poetry run python -m .main -``` - - -## Expected Results - -:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ - -```bash -# it is likely that for one experiment you need to sweep over different hyperparameters. You are encouraged to use Hydra's multirun functionality for this. This is an example of how you could achieve this for some typical FL hyperparameteres - -poetry run python -m .main --multirun num_client_per_round=5,10,50 dataset=femnist,cifar10 -# the above command will run a total of 6 individual experiments (because 3client_configs x 2datasets = 6 -- you can think of it as a grid). - -[Now show a figure/table displaying the results of the above command] - -# add more commands + plots for additional experiments. -``` diff --git a/baselines/baseline_template/baseline_template/client.py b/baselines/baseline_template/baseline_template/client.py deleted file mode 100644 index d2e2206111f3..000000000000 --- a/baselines/baseline_template/baseline_template/client.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Define your client class and a function to construct such clients. - -Please overwrite `flwr.client.NumPyClient` or `flwr.client.Client` and create a function -to instantiate your client. -""" diff --git a/baselines/baseline_template/baseline_template/conf/base.yaml b/baselines/baseline_template/baseline_template/conf/base.yaml deleted file mode 100644 index 2d65b3b989b2..000000000000 --- a/baselines/baseline_template/baseline_template/conf/base.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -# this is the config that will be loaded as default by main.py -# Please follow the provided structure (this will ensuring all baseline follow -# a similar configuration structure and hence be easy to customise) - -dataset: - # dataset config - -model: - # model config - -strategy: - _target_: # points to your strategy (either custom or exiting in Flower) - # rest of strategy config - -client: - # client config diff --git a/baselines/baseline_template/baseline_template/dataset.py b/baselines/baseline_template/baseline_template/dataset.py deleted file mode 100644 index 5e436abe12fb..000000000000 --- a/baselines/baseline_template/baseline_template/dataset.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Handle basic dataset creation. - -In case of PyTorch it should return dataloaders for your dataset (for both the clients -and the server). If you are using a custom dataset class, this module is the place to -define it. If your dataset requires to be downloaded (and this is not done -automatically -- e.g. as it is the case for many dataset in TorchVision) and -partitioned, please include all those functions and logic in the -`dataset_preparation.py` module. You can use all those functions from functions/methods -defined here of course. -""" diff --git a/baselines/baseline_template/baseline_template/dataset_preparation.py b/baselines/baseline_template/baseline_template/dataset_preparation.py deleted file mode 100644 index bd3440b9276b..000000000000 --- a/baselines/baseline_template/baseline_template/dataset_preparation.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Handle the dataset partitioning and (optionally) complex downloads. - -Please add here all the necessary logic to either download, uncompress, pre/post-process -your dataset (or all of the above). If the desired way of running your baseline is to -first download the dataset and partition it and then run the experiments, please -uncomment the lines below and tell us in the README.md (see the "Running the Experiment" -block) that this file should be executed first. -""" -# import hydra -# from hydra.core.hydra_config import HydraConfig -# from hydra.utils import call, instantiate -# from omegaconf import DictConfig, OmegaConf - - -# @hydra.main(config_path="conf", config_name="base", version_base=None) -# def download_and_preprocess(cfg: DictConfig) -> None: -# """Does everything needed to get the dataset. - -# Parameters -# ---------- -# cfg : DictConfig -# An omegaconf object that stores the hydra config. -# """ - -# ## 1. print parsed config -# print(OmegaConf.to_yaml(cfg)) - -# # Please include here all the logic -# # Please use the Hydra config style as much as possible specially -# # for parts that can be customised (e.g. how data is partitioned) - -# if __name__ == "__main__": - -# download_and_preprocess() diff --git a/baselines/baseline_template/baseline_template/main.py b/baselines/baseline_template/baseline_template/main.py deleted file mode 100644 index 25ae1bec6a10..000000000000 --- a/baselines/baseline_template/baseline_template/main.py +++ /dev/null @@ -1,57 +0,0 @@ -"""Create and connect the building blocks for your experiments; start the simulation. - -It includes processioning the dataset, instantiate strategy, specify how the global -model is going to be evaluated, etc. At the end, this script saves the results. -""" -# these are the basic packages you'll need here -# feel free to remove some if aren't needed -import hydra -from omegaconf import DictConfig, OmegaConf - - -@hydra.main(config_path="conf", config_name="base", version_base=None) -def main(cfg: DictConfig) -> None: - """Run the baseline. - - Parameters - ---------- - cfg : DictConfig - An omegaconf object that stores the hydra config. - """ - # 1. Print parsed config - print(OmegaConf.to_yaml(cfg)) - - # 2. Prepare your dataset - # here you should call a function in datasets.py that returns whatever is needed to: - # (1) ensure the server can access the dataset used to evaluate your model after - # aggregation - # (2) tell each client what dataset partitions they should use (e.g. a this could - # be a location in the file system, a list of dataloader, a list of ids to extract - # from a dataset, it's up to you) - - # 3. Define your clients - # Define a function that returns another function that will be used during - # simulation to instantiate each individual client - # client_fn = client.() - - # 4. Define your strategy - # pass all relevant argument (including the global dataset used after aggregation, - # if needed by your method.) - # strategy = instantiate(cfg.strategy, ) - - # 5. Start Simulation - # history = fl.simulation.start_simulation() - - # 6. Save your results - # Here you can save the `history` returned by the simulation and include - # also other buffers, statistics, info needed to be saved in order to later - # on generate the plots you provide in the README.md. You can for instance - # access elements that belong to the strategy for example: - # data = strategy.get_my_custom_data() -- assuming you have such method defined. - # Hydra will generate for you a directory each time you run the code. You - # can retrieve the path to that directory with this: - # save_path = HydraConfig.get().runtime.output_dir - - -if __name__ == "__main__": - main() diff --git a/baselines/baseline_template/baseline_template/models.py b/baselines/baseline_template/baseline_template/models.py deleted file mode 100644 index 71fa553d1f59..000000000000 --- a/baselines/baseline_template/baseline_template/models.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Define our models, and training and eval functions. - -If your model is 100% off-the-shelf (e.g. directly from torchvision without requiring -modifications) you might be better off instantiating your model directly from the Hydra -config. In this way, swapping your model for another one can be done without changing -the python code at all -""" diff --git a/baselines/baseline_template/baseline_template/server.py b/baselines/baseline_template/baseline_template/server.py deleted file mode 100644 index 2fd7d42cde5a..000000000000 --- a/baselines/baseline_template/baseline_template/server.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Create global evaluation function. - -Optionally, also define a new Server class (please note this is not needed in most -settings). -""" diff --git a/baselines/baseline_template/baseline_template/strategy.py b/baselines/baseline_template/baseline_template/strategy.py deleted file mode 100644 index 17436c401c30..000000000000 --- a/baselines/baseline_template/baseline_template/strategy.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Optionally define a custom strategy. - -Needed only when the strategy is not yet implemented in Flower or because you want to -extend or modify the functionality of an existing strategy. -""" diff --git a/baselines/baseline_template/baseline_template/utils.py b/baselines/baseline_template/baseline_template/utils.py deleted file mode 100644 index 9a831719d623..000000000000 --- a/baselines/baseline_template/baseline_template/utils.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Define any utility function. - -They are not directly relevant to the other (more FL specific) python modules. For -example, you may define here things like: loading a model from a checkpoint, saving -results, plotting. -""" diff --git a/baselines/dev/create-baseline.sh b/baselines/dev/create-baseline.sh deleted file mode 100755 index 53cd79c569aa..000000000000 --- a/baselines/dev/create-baseline.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# This script duplicates the `baseline_template` directory and changes its name -# to the one you specify when running this script. That name is also used to -# rename the subdirectory inside your new baseline directory as well as to set -# the Python package name that Poetry will build - -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ - -template="baseline_template" -name=$1 - -# copying directory -echo "Copying '$template' and renaming it to '$name'" -cp -r $template $name - -# renaming sub-directory -echo "Renaming sub-directory as '$name'" -mv $name/$template $name/$name - -# adjusting package name in pyproject.toml -cd $name -if [[ "$OSTYPE" == "darwin"* ]]; then - sed -i '' -e "s//$name/" pyproject.toml -else - sed -i -e "s//$name/" pyproject.toml -fi - -echo "!!! Your directory for your baseline '$name' is ready." diff --git a/baselines/doc/source/conf.py b/baselines/doc/source/conf.py index ecc3482c6fce..a2667dbcf006 100644 --- a/baselines/doc/source/conf.py +++ b/baselines/doc/source/conf.py @@ -37,7 +37,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.10.0" +release = "1.13.0" # -- General configuration --------------------------------------------------- diff --git a/baselines/doc/source/how-to-contribute-baselines.rst b/baselines/doc/source/how-to-contribute-baselines.rst index b568e73f1c11..429ac714c1aa 100644 --- a/baselines/doc/source/how-to-contribute-baselines.rst +++ b/baselines/doc/source/how-to-contribute-baselines.rst @@ -6,16 +6,14 @@ Do you have a new federated learning paper and want to add a new baseline to Flo The goal of Flower Baselines is to reproduce experiments from popular papers to accelerate researchers by enabling faster comparisons to new strategies, datasets, models, and federated pipelines in general. Before you start to work on a new baseline or experiment, please check the `Flower Issues `_ or `Flower Pull Requests `_ to see if someone else is already working on it. Please open a new issue if you are planning to work on a new baseline or experiment with a short description of the corresponding paper and the experiment you want to contribute. +If you are proposing a brand new baseline, please indicate what experiments from the paper are planning to include. Requirements ------------ -Contributing a new baseline is really easy. You only have to make sure that your federated learning experiments are running with Flower and replicate the results of a paper. Flower baselines need to make use of: +Contributing a new baseline is really easy. You only have to make sure that your federated learning experiments run with Flower, use `Flower Datasets `_, and replicate the results of a paper. +Preferably, the baselines make use of PyTorch, but other ML frameworks are also welcome. The baselines are expected to run in a machine with Ubuntu 22.04, but if yours runs also on macOS even better! -* `Poetry `_ to manage the Python environment. -* `Hydra `_ to manage the configuration files for your experiments. - -You can find more information about how to setup Poetry in your machine in the ``EXTENDED_README.md`` that is generated when you prepare your baseline. Add a new Flower Baseline ------------------------- @@ -27,11 +25,18 @@ Let's say you want to contribute the code of your most recent Federated Learning #. **Get the Flower source code on your machine** #. Fork the Flower codebase: go to the `Flower GitHub repo `_ and fork the code (click the *Fork* button in the top-right corner and follow the instructions) #. Clone the (forked) Flower source code: :code:`git clone git@github.com:[your_github_username]/flower.git` - #. Open the code in your favorite editor. -#. **Use the provided script to create your baseline directory** - #. Navigate to the baselines directory and run :code:`./dev/create-baseline.sh fedawesome` - #. A new directory in :code:`baselines/fedawesome` is created. - #. Follow the instructions in :code:`EXTENDED_README.md` and :code:`README.md` in your baseline directory. +#. **Create a new baseline using the template** + #. Create a new Python environment with Python 3.10 (we recommend doing this with `pyenv `_) + #. Install flower with: :code:`pip install flwr`. + #. Navigate to the baselines directory and run: :code:`flwr new fedawesome`. When prompted, choose the option :code:`Flower Baseline`. + #. A new directory in :code:`baselines/fedawesome` is created with the structure needed for a Flower Baseline. + #. Follow the instructions in the :code:`README.md` in your baseline directory. + + .. tip:: + At this point, your baseline contains source code showing how a simple :code:`PyTorch+CIFAR10` project can be built with Flower. + You can run it directly by executing :code:`flwr run .` from inside the directory of your baseline. Update the code with that + needed to implement your baseline. + #. **Open a pull request** #. Stage your changes: :code:`git add .` #. Commit & push: :code:`git commit -m "Create new FedAwesome baseline" ; git push` @@ -49,15 +54,18 @@ Further reading: Usability --------- -Flower is known and loved for its usability. Therefore, make sure that your baseline or experiment can be executed with a single command such as: +Flower is known and loved for its usability. Therefore, make sure that your baseline or experiment can be executed with a single command after installing the baseline project: .. code-block:: bash - poetry run python -m .main - - # or, once sourced into your environment - python -m .main + # Install the baseline project + pip install -e . + + # Run the baseline using default config + flwr run . + + # Run the baseline overriding the config + flwr run . --run-config lr=0.01,num-server-rounds=200 -We provide you with a `template-baseline `_ to use as guidance when contributing your baseline. Having all baselines follow a homogenous structure helps users to tryout many baselines without the overheads of having to understand each individual codebase. Similarly, by using Hydra throughout, users will immediately know how to parameterise your experiments directly from the command line. -We look forward to your contribution! +We look forward to your contribution! \ No newline at end of file diff --git a/baselines/doc/source/how-to-use-baselines.rst b/baselines/doc/source/how-to-use-baselines.rst index 4704a9b6074e..ec65f8f7d5ee 100644 --- a/baselines/doc/source/how-to-use-baselines.rst +++ b/baselines/doc/source/how-to-use-baselines.rst @@ -5,7 +5,6 @@ Use Baselines We are changing the way we structure the Flower baselines. While we complete the transition to the new format, you can still find the existing baselines and use them: `baselines (old) `_. Currently, you can make use of baselines for `FedAvg `_, `FedOpt `_, and `LEAF-FEMNIST `_. - The documentation below has been updated to reflect the new way of using Flower baselines. Structure --------- @@ -15,87 +14,116 @@ All baselines are available in the directory `baselines / + ├── LICENSE ├── README.md - ├── pyproject.toml + ├── pyproject.toml # defines dependencies + ├── _static # optionally a directory to save plots └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - -Please note that some baselines might include additional files (e.g. a :code:`requirements.txt`) or a hierarchy of :code:`.yaml` files for `Hydra `_. + └── *.py # several .py files Setting up your machine ----------------------- -.. note:: - Flower baselines are designed to run on Ubuntu 22.04. While a GPU is not required to run the baselines, some of the more computationally demanding ones do benefit from GPU acceleration. +.. tip:: + Flower baselines are designed to run on Ubuntu 22.04 and Python 3.10. While a GPU is not required to run the baselines, some of the more computationally demanding ones do benefit from GPU acceleration. + All baselines are expected to make use of `pyenv `_. -Common to all baselines is `Poetry `_, a tool to manage Python dependencies. Baselines also make use of `Pyenv `_. You'll need to install both on your system before running a baseline. What follows is a step-by-step guide on getting :code:`pyenv` and :code:`Poetry` installed on your system. +.. note:: + We are in the process of migrating all baselines to use `flwr run`. Those that haven't yet been migrated still make use of `Poetry `_, a tool to manage Python dependencies. + Identifying whether the baseline you want to run requires Poetry or not is easy: check if the `Environment Setup` section in the baseline readme mentions Poetry. + Follow the instructions later in this section if you need to setup Poetry in your system. -Let's begin by installing :code:`pyenv`. We'll be following the standard procedure. Please refer to the `pyenv docs `_ for alternative ways of installing it. +Let's begin by installing :code:`pyenv`. We'll be following the standard procedure. Please refer to the `pyenv docs `_ for alternative ways of installing it, including for platforms other than Ubuntu. .. code-block:: bash - # first install a few packages needed later for pyenv - sudo apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ - libreadline-dev libbz2-dev libffi-dev liblzma-dev + # first install a few packages needed later for pyenv + sudo apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ + libreadline-dev libbz2-dev libffi-dev liblzma-dev - # now clone pyenv into your home directory (this is the default way of installing pyenv) - git clone https://github.com/pyenv/pyenv.git ~/.pyenv + # now clone pyenv into your home directory (this is the default way of installing pyenv) + git clone https://github.com/pyenv/pyenv.git ~/.pyenv - # Then add pyenv to your path by adding the below to your .bashrc/.zshrc - export PYENV_ROOT="$HOME/.pyenv" - command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" - eval "$(pyenv init -)" + # Then add pyenv to your path by adding the below to your .bashrc/.zshrc + export PYENV_ROOT="$HOME/.pyenv" + command -v pyenv >/dev/null || export PATH="$PYENV_ROOT/bin:$PATH" + eval "$(pyenv init -)" Verify your installation by opening a new terminal and .. code-block:: bash - # check python versions available - pyenv versions - # * system (...) # <-- it should just show one + # check python versions available + pyenv versions + # * system (...) # <-- it should just show one + +Then you can proceed and install any version of Python. Baselines use Python 3.10, so we'll be installing a recent version of it. + +.. code-block:: bash + + pyenv install 3.10.14 + # this will take a little while + # once done, you should see that that version is available + pyenv versions + # system + # * 3.10.14 # <-- you just installed this -Then you can proceed and install any version of Python. Most baselines currently use Python 3.10.6, so we'll be installing that one. +Next, let's install the :code:`virtualenv` plugin. Check `the documentation `_ for alternative installation methods. .. code-block:: bash - pyenv install 3.10.6 - # this will take a little while - # once done, you should see that that version is available - pyenv versions - # system - # * 3.10.6 # <-- you just installed this + # Clone `pyenv-virtualenv` + git clone https://github.com/pyenv/pyenv-virtualenv.git $(pyenv root)/plugins/pyenv-virtualenv + + # Restart your shell + exec "$SHELL" + -Now that we have :code:`pyenv` installed, we are ready to install :code:`poetry`. Installing Poetry can be done from a single command: +Using :code:`pyenv` +~~~~~~~~~~~~~~~~~~~ + +Creating a virtual environment can be done as follows: .. code-block:: bash - curl -sSL https://install.python-poetry.org | python3 - + # Create an environment for Python 3.10.14 named test-env + pyenv virtualenv 3.10.14 test-env + + # Then activate it + pyenv activate test-env + + # Deactivate it as follows + pyenv deactivate - # add to path by putting this line at the end of your .zshrc/.bashrc - export PATH="$HOME/.local/bin:$PATH" + +(optional) Setup Poetry +~~~~~~~~~~~~~~~~~~~~~~~ + +Now that we have :code:`pyenv` installed, we are ready to install :code:`poetry`. It can be done from a single command: + +.. code-block:: bash + + curl -sSL https://install.python-poetry.org | python3 - + + # add to path by putting this line at the end of your .zshrc/.bashrc + export PATH="$HOME/.local/bin:$PATH" To install Poetry from source, to customise your installation, or to further integrate Poetry with your shell after installation, please check `the Poetry documentation `_. + Using a Flower Baseline ----------------------- -To use Flower Baselines you need first to install :code:`pyenv` and :code:`Poetry`, then: +To use Flower Baselines you need first to install :code:`pyenv` and, depending on the baselines, also :code:`Poetry`, then: 1. Clone the flower repository .. code-block:: bash - git clone https://github.com/adap/flower.git && cd flower + git clone https://github.com/adap/flower.git && cd flower 2. Navigate inside the directory of the baseline you'd like to run -3. Follow the :code:`[Environment Setup]` instructions in the :code:`README.md`. In most cases this will require you to just do: - -.. code-block:: bash - - poetry install - -4. Run the baseline as indicated in the :code:`[Running the Experiments]` section in the :code:`README.md` or in the `[Expected Results]` section to reproduce the experiments in the paper. +3. Follow the :code:`[Environment Setup]` instructions in the :code:`README.md`. +4. Run the baseline as indicated in the :code:`[Running the Experiments]` section in the :code:`README.md` or in the :code:`[Expected Results]` section to reproduce the experiments in the paper. diff --git a/baselines/fedrep/.gitignore b/baselines/fedrep/.gitignore new file mode 100644 index 000000000000..eca5b2809311 --- /dev/null +++ b/baselines/fedrep/.gitignore @@ -0,0 +1,5 @@ +# generated files +outputs/ +client_states/ +datasets/ +models/ diff --git a/baselines/baseline_template/LICENSE b/baselines/fedrep/LICENSE similarity index 100% rename from baselines/baseline_template/LICENSE rename to baselines/fedrep/LICENSE diff --git a/baselines/fedrep/README.md b/baselines/fedrep/README.md new file mode 100644 index 000000000000..ece30edf0943 --- /dev/null +++ b/baselines/fedrep/README.md @@ -0,0 +1,126 @@ +--- +title: Exploiting Shared Representations for Personalized Federated Learning +url: http://arxiv.org/abs/2102.07078 +labels: [image classification, label heterogeneity, personalized federated learning] +dataset: [CIFAR-10, CIFAR-100] +--- + +# Exploiting Shared Representations for Personalized Federated Learning + +**Paper:** [arxiv.org/abs/2102.07078](http://arxiv.org/abs/2102.07078) + +**Authors:** Liam Collins, Hamed Hassani, Aryan Mokhtari, Sanjay Shakkottai + +**Abstract:** Deep neural networks have shown the ability to extract universal feature representations from data such as images and text that have been useful for a variety of learning tasks. However, the fruits of representation learning have yet to be fully-realized in federated settings. Although data in federated settings is often non-i.i.d. across clients, the success of centralized deep learning suggests that data often shares a global feature representation, while the statistical heterogeneity across clients or tasks is concentrated in the labels. Based on this intuition, we propose a novel federated learning framework and algorithm for learning a shared data representation across clients and unique local heads for each client. Our algorithm harnesses the distributed computational power across clients to perform many local-updates with respect to the low-dimensional local parameters for every update of the representation. We prove that this method obtains linear convergence to the ground-truth representation with near-optimal sample complexity in a linear setting, demonstrating that it can efficiently reduce the problem dimension for each client. This result is of interest beyond federated learning to a broad class of problems in which we aim to learn a shared low-dimensional representation among data distributions, for example in meta-learning and multi-task learning. Further, extensive experimental results show the empirical improvement of our method over alternative personalized federated learning approaches in federated environments with heterogeneous data. + + +## About this baseline + +**What’s implemented:** The code in this directory replicates the experiments in _Exploiting Shared Representations for Personalized Federated Learning_ (Liam Collins et al., 2021) for CIFAR10 and CIFAR-100 datasets, which proposed the `FedRep` model. Specifically, it replicates the results of CIFAR-10 (`(100, 2), (100, 5)`) and CIFAR-100 (`(100, 5), (100, 20)`) found in table 1 in their paper. + +**Datasets:** CIFAR-10, CIFAR-100 from `Flower Datasets`. + +**Hardware Setup:** WSL2 Ubuntu 22.04 LTS, NVIDIA RTX 3070 Laptop, 32GB RAM, AMD Ryzen 9 5900HX. + +**Contributors:** Jiahao Tan<> + + +## Experimental Setup + +**Task:** Image Classification + +**Model:** This directory implements 2 models: + +- CNNCifar10 +- CNNCifar100 + +These two models are modified from the [official repo](https://github.com/rahulv0205/fedrep_experiments)'s. To be clear that, in the official models, there is no BN layers. However, without BN layer helping, training will definitely collapse. + +Please see how models are implemented using a so called model_manager and model_split class since FedRep uses head and base layers in a neural network. These classes are defined in the `models.py` file and thereafter called when building new models in the directory `/implemented_models`. Please, extend and add new models as you wish. + +**Dataset:** CIFAR10, CIFAR-100. CIFAR10/100 will be partitioned based on number of classes for data that each client shall receive e.g. 4 allocated classes could be [1, 3, 5, 9]. + +**Training Hyperparameters:** The hyperparameters can be found in `conf/base.yaml` file which is the configuration file for the main script. + +| Description | Default Value | +| --------------------- | ----------------------------------- | +| `num_clients` | `100` | +| `num_rounds` | `100` | +| `num_local_epochs` | `5` | +| `num_rep_epochs` | `1` | +| `enable_finetune` | `False` | +| `num_finetune_epochs` | `5` | +| `use_cuda` | `true` | +| `specified_device` | `null` | +| `client resources` | `{'num_cpus': 2, 'num_gpus': 0.5 }` | +| `learning_rate` | `0.01` | +| `batch_size` | `50` | +| `model_name` | `cnncifar10` | +| `algorithm` | `fedrep` | + + +## Environment Setup + +To construct the Python environment follow these steps: + +```bash +# Set Python 3.10 +pyenv local 3.10.12 +# Tell poetry to use python 3.10 +poetry env use 3.10.12 + +# Install the base Poetry environment +poetry install + +# Activate the environment +poetry shell +``` + +## Running the Experiments + +``` +python -m fedrep.main # this will run using the default settings in the `conf/base.yaml` +``` + +While the config files contain a large number of settings, the ones below are the main ones you'd likely want to modify to . +```bash +algorithm: fedavg, fedrep # these are currently supported +dataset.name: cifar10, cifar100 +dataset.num_classes: 2, 5, 20 (only for CIFAR-100) +model_name: cnncifar10, cnncifar100 +``` + + +## Expected Results + +### CIFAR-10 (100, 2) + +``` +python -m fedrep.main --config-name cifar10_100_2 algorithm=fedrep +python -m fedrep.main --config-name cifar10_100_2 algorithm=fedavg +``` + + +### CIFAR-10 (100, 5) + +``` +python -m fedrep.main --config-name cifar10_100_5 algorithm=fedrep +python -m fedrep.main --config-name cifar10_100_5 algorithm=fedavg +``` + + +### CIFAR-100 (100, 5) + +``` +python -m fedrep.main --config-name cifar100_100_5 algorithm=fedrep +python -m fedrep.main --config-name cifar100_100_5 algorithm=fedavg +``` + + +### CIFAR-100 (100, 20) + +``` +python -m fedrep.main --config-name cifar100_100_20 algorithm=fedrep +python -m fedrep.main --config-name cifar100_100_20 algorithm=fedavg +``` + \ No newline at end of file diff --git a/baselines/fedrep/_static/cifar100_100_20.png b/baselines/fedrep/_static/cifar100_100_20.png new file mode 100644 index 000000000000..2421f15ac6c6 Binary files /dev/null and b/baselines/fedrep/_static/cifar100_100_20.png differ diff --git a/baselines/fedrep/_static/cifar100_100_5.png b/baselines/fedrep/_static/cifar100_100_5.png new file mode 100644 index 000000000000..17f25eb480c4 Binary files /dev/null and b/baselines/fedrep/_static/cifar100_100_5.png differ diff --git a/baselines/fedrep/_static/cifar10_100_2.png b/baselines/fedrep/_static/cifar10_100_2.png new file mode 100644 index 000000000000..75ee48b2c970 Binary files /dev/null and b/baselines/fedrep/_static/cifar10_100_2.png differ diff --git a/baselines/fedrep/_static/cifar10_100_5.png b/baselines/fedrep/_static/cifar10_100_5.png new file mode 100644 index 000000000000..1d20a953f9c4 Binary files /dev/null and b/baselines/fedrep/_static/cifar10_100_5.png differ diff --git a/baselines/baseline_template/baseline_template/__init__.py b/baselines/fedrep/fedrep/__init__.py similarity index 100% rename from baselines/baseline_template/baseline_template/__init__.py rename to baselines/fedrep/fedrep/__init__.py diff --git a/baselines/fedrep/fedrep/base_model.py b/baselines/fedrep/fedrep/base_model.py new file mode 100644 index 000000000000..e6a74c01bf9b --- /dev/null +++ b/baselines/fedrep/fedrep/base_model.py @@ -0,0 +1,324 @@ +"""Abstract class for splitting a model into body and head.""" + +import os +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional, OrderedDict, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from omegaconf import DictConfig +from torch import Tensor +from torch.utils.data import DataLoader + +from fedrep.constants import ( + DEFAULT_FINETUNE_EPOCHS, + DEFAULT_LOCAL_TRAIN_EPOCHS, + DEFAULT_REPRESENTATION_EPOCHS, +) + + +def get_device( + use_cuda: bool = True, specified_device: Optional[int] = None +) -> torch.device: + """Get the tensor device. + + Args: + use_cuda: Flag indicates whether to use CUDA or not. Defaults to True. + specified_device: Specified cuda device to use. Defaults to None. + + Raises + ------ + ValueError: Specified device not in CUDA_VISIBLE_DEVICES. + + Returns + ------- + The selected or fallbacked device. + """ + device = torch.device("cpu") + if use_cuda and torch.cuda.is_available(): + if specified_device is not None: + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES") + if cuda_visible_devices is not None: + devices = [int(d) for d in cuda_visible_devices.split(",")] + if specified_device in devices: + device = torch.device(f"cuda:{specified_device}") + else: + raise ValueError( + f"Specified device {specified_device}" + " not in CUDA_VISIBLE_DEVICES" + ) + else: + print("CUDA_VISIBLE_DEVICES not exists, using torch.device('cuda').") + else: + device = torch.device("cuda") + + return device + + +class ModelSplit(ABC, nn.Module): + """Abstract class for splitting a model into body and head.""" + + def __init__(self, model: nn.Module): + """Initialize the attributes of the model split. + + Args: + model: dict containing the vocab sizes of the input attributes. + """ + super().__init__() + + self._body, self._head = self._get_model_parts(model) + + @abstractmethod + def _get_model_parts(self, model: nn.Module) -> Tuple[nn.Module, nn.Module]: + """Return the body and head of the model. + + Args: + model: model to be split into head and body + + Returns + ------- + Tuple where the first element is the body of the model + and the second is the head. + """ + + @property + def body(self) -> nn.Module: + """Return model body.""" + return self._body + + @body.setter + def body(self, state_dict: OrderedDict[str, Tensor]) -> None: + """Set model body. + + Args: + state_dict: dictionary of the state to set the model body to. + """ + self._body.load_state_dict(state_dict, strict=True) + + @property + def head(self) -> nn.Module: + """Return model head.""" + return self._head + + @head.setter + def head(self, state_dict: OrderedDict[str, Tensor]) -> None: + """Set model head. + + Args: + state_dict: dictionary of the state to set the model head to. + """ + self._head.load_state_dict(state_dict, strict=True) + + def get_parameters(self) -> List[np.ndarray]: + """Get model parameters. + + Returns + ------- + Body and head parameters + """ + return [ + val.cpu().numpy() + for val in [ + *self.body.state_dict().values(), + *self.head.state_dict().values(), + ] + ] + + def set_parameters(self, state_dict: Dict[str, Tensor]) -> None: + """Set model parameters. + + Args: + state_dict: dictionary of the state to set the model to. + """ + self.load_state_dict(state_dict, strict=False) + + def enable_head(self) -> None: + """Enable gradient tracking for the head parameters.""" + for param in self._head.parameters(): + param.requires_grad = True + + def enable_body(self) -> None: + """Enable gradient tracking for the body parameters.""" + for param in self._body.parameters(): + param.requires_grad = True + + def disable_head(self) -> None: + """Disable gradient tracking for the head parameters.""" + for param in self._head.parameters(): + param.requires_grad = False + + def disable_body(self) -> None: + """Disable gradient tracking for the body parameters.""" + for param in self._body.parameters(): + param.requires_grad = False + + def forward(self, inputs: Any) -> Any: + """Forward inputs through the body and the head.""" + return self.head(self.body(inputs)) + + +# pylint: disable=R0902, R0913, R0801 +class ModelManager(ABC): + """Manager for models with Body/Head split.""" + + def __init__( + self, + client_id: int, + config: DictConfig, + trainloader: DataLoader, + testloader: DataLoader, + client_save_path: Optional[str], + model_split_class: Any, # ModelSplit + ): + """Initialize the attributes of the model manager. + + Args: + client_id: The id of the client. + config: Dict containing the configurations to be used by the manager. + trainloader: Client train dataloader. + testloader: Client test dataloader. + client_save_path: Path to save the client model head state. + model_split_class: Class to be used to split the model into body and head \ + (concrete implementation of ModelSplit). + """ + super().__init__() + self.config = config + self.client_id = client_id + self.trainloader = trainloader + self.testloader = testloader + self.device = get_device( + use_cuda=getattr(self.config, "use_cuda", True), + specified_device=getattr(self.config, "specified_device", None), + ) + self.client_save_path = client_save_path + self.learning_rate = config.get("learning_rate", 0.01) + self.momentum = config.get("momentum", 0.5) + self._model: ModelSplit = model_split_class(self._create_model()) + + @abstractmethod + def _create_model(self) -> nn.Module: + """Return model to be splitted into head and body.""" + + @property + def model(self) -> ModelSplit: + """Return model.""" + return self._model + + def train(self) -> Dict[str, Union[List[Dict[str, float]], int, float]]: + """Train the model maintained in self.model. + + Returns + ------- + Dict containing the train metrics. + """ + # Load client state (head) if client_save_path is not None and it is not empty + if self.client_save_path is not None and os.path.isfile(self.client_save_path): + self._model.head.load_state_dict(torch.load(self.client_save_path)) + + num_local_epochs = DEFAULT_LOCAL_TRAIN_EPOCHS + if hasattr(self.config, "num_local_epochs"): + num_local_epochs = int(self.config.num_local_epochs) + + num_rep_epochs = DEFAULT_REPRESENTATION_EPOCHS + if hasattr(self.config, "num_rep_epochs"): + num_rep_epochs = int(self.config.num_rep_epochs) + + criterion = torch.nn.CrossEntropyLoss() + weights = [v for k, v in self._model.named_parameters() if "weight" in k] + biases = [v for k, v in self._model.named_parameters() if "bias" in k] + optimizer = torch.optim.SGD( + [ + {"params": weights, "weight_decay": 1e-4}, + {"params": biases, "weight_decay": 0.0}, + ], + lr=self.learning_rate, + momentum=self.momentum, + ) + correct, total = 0, 0 + loss: torch.Tensor = 0.0 + + self._model.train() + for i in range(num_local_epochs + num_rep_epochs): + if i < num_local_epochs: + self._model.disable_body() + self._model.enable_head() + else: + self._model.enable_body() + self._model.disable_head() + for batch in self.trainloader: + images = batch["img"] + labels = batch["label"] + outputs = self._model(images.to(self.device)) + labels = labels.to(self.device) + loss = criterion(outputs, labels) + optimizer.zero_grad() + loss.backward() + optimizer.step() + total += labels.size(0) + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + + # Save client state (head) + if self.client_save_path is not None: + torch.save(self._model.head.state_dict(), self.client_save_path) + + return {"loss": loss.item(), "accuracy": correct / total} + + def test(self) -> Dict[str, float]: + """Test the model maintained in self.model. + + Returns + ------- + Dict containing the test metrics. + """ + # Load client state (head) + if self.client_save_path is not None and os.path.isfile(self.client_save_path): + self._model.head.load_state_dict(torch.load(self.client_save_path)) + + num_finetune_epochs = DEFAULT_FINETUNE_EPOCHS + if hasattr(self.config, "num_finetune_epochs"): + num_finetune_epochs = int(self.config.num_finetune_epochs) + + if num_finetune_epochs > 0 and self.config.get("enable_finetune", False): + optimizer = torch.optim.SGD(self._model.parameters(), lr=self.learning_rate) + criterion = torch.nn.CrossEntropyLoss() + self._model.train() + for _ in range(num_finetune_epochs): + for batch in self.trainloader: + images = batch["img"].to(self.device) + labels = batch["label"].to(self.device) + outputs = self._model(images) + loss = criterion(outputs, labels) + optimizer.zero_grad() + loss.backward() + optimizer.step() + + criterion = torch.nn.CrossEntropyLoss() + correct, total, loss = 0, 0, 0.0 + + self._model.eval() + with torch.no_grad(): + for batch in self.testloader: + images = batch["img"].to(self.device) + labels = batch["label"].to(self.device) + outputs = self._model(images) + loss += criterion(outputs, labels).item() + total += labels.size(0) + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + + return { + "loss": loss / len(self.testloader.dataset), + "accuracy": correct / total, + } + + def train_dataset_size(self) -> int: + """Return train data set size.""" + return len(self.trainloader.dataset) + + def test_dataset_size(self) -> int: + """Return test data set size.""" + return len(self.testloader.dataset) + + def total_dataset_size(self) -> int: + """Return total data set size.""" + return len(self.trainloader.dataset) + len(self.testloader.dataset) diff --git a/baselines/fedrep/fedrep/client.py b/baselines/fedrep/fedrep/client.py new file mode 100644 index 000000000000..f857fd2cf82a --- /dev/null +++ b/baselines/fedrep/fedrep/client.py @@ -0,0 +1,319 @@ +"""Client implementation - can call FedPep and FedAvg clients.""" + +from collections import OrderedDict +from pathlib import Path +from typing import Callable, Dict, List, Tuple, Type, Union + +import numpy as np +import torch +from flwr.client import Client, NumPyClient +from flwr.common import NDArrays, Scalar +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import PathologicalPartitioner +from flwr_datasets.preprocessor import Merger +from omegaconf import DictConfig +from torch.utils.data import DataLoader +from torchvision import transforms + +from fedrep.constants import MEAN, STD, Algorithm +from fedrep.models import CNNCifar10ModelManager, CNNCifar100ModelManager + +PROJECT_DIR = Path(__file__).parent.parent.absolute() + +FEDERATED_DATASET = None + + +class BaseClient(NumPyClient): + """Implementation of Federated Averaging (FedAvg) Client.""" + + # pylint: disable=R0913 + def __init__( + self, + client_id: int, + trainloader: DataLoader, + testloader: DataLoader, + config: DictConfig, + model_manager_class: Union[ + Type[CNNCifar10ModelManager], Type[CNNCifar100ModelManager] + ], + client_state_save_path: str = "", + ): + """Initialize client attributes. + + Args: + client_id: The client ID. + trainloader: Client train data loader. + testloader: Client test data loader. + config: dictionary containing the client configurations. + model_manager_class: class to be used as the model manager. + client_state_save_path: Path for saving model head parameters. + (Just for FedRep). Defaults to "". + """ + super().__init__() + + self.client_id = client_id + self.client_state_save_path = ( + (client_state_save_path + f"/client_{self.client_id}") + if client_state_save_path != "" + else None + ) + self.model_manager = model_manager_class( + client_id=self.client_id, + config=config, + trainloader=trainloader, + testloader=testloader, + client_save_path=self.client_state_save_path, + ) + + def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + """Return the current local model parameters.""" + return self.model_manager.model.get_parameters() + + def set_parameters( + self, parameters: List[np.ndarray], evaluate: bool = False + ) -> None: + """Set the local model parameters to the received parameters. + + Args: + parameters: parameters to set the model to. + """ + _ = evaluate + model_keys = [ + k + for k in self.model_manager.model.state_dict().keys() + if k.startswith("_body") or k.startswith("_head") + ] + params_dict = zip(model_keys, parameters) + + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + + self.model_manager.model.set_parameters(state_dict) + + def perform_train(self) -> Dict[str, Union[List[Dict[str, float]], int, float]]: + """Perform local training to the whole model. + + Returns + ------- + Dict with the train metrics. + """ + self.model_manager.model.enable_body() + self.model_manager.model.enable_head() + + return self.model_manager.train() + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict[str, Union[bool, bytes, float, int, str]]]: + """Train the provided parameters using the locally held dataset. + + Args: + parameters: The current (global) model parameters. + config: configuration parameters for training sent by the server. + + Returns + ------- + Tuple containing the locally updated model parameters, \ + the number of examples used for training and \ + the training metrics. + """ + self.set_parameters(parameters) + + train_results = self.perform_train() + + # Update train history + print("<------- TRAIN RESULTS -------> :", train_results) + + return self.get_parameters(config), self.model_manager.train_dataset_size(), {} + + def evaluate( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[float, int, Dict[str, Union[bool, bytes, float, int, str]]]: + """Evaluate the provided global parameters using the locally held dataset. + + Args: + parameters: The current (global) model parameters. + config: configuration parameters for training sent by the server. + + Returns + ------- + Tuple containing the test loss, \ + the number of examples used for evaluation and \ + the evaluation metrics. + """ + self.set_parameters(parameters, evaluate=True) + + # Test the model + test_results = self.model_manager.test() + print("<------- TEST RESULTS -------> :", test_results) + + return ( + test_results.get("loss", 0.0), + self.model_manager.test_dataset_size(), + {k: v for k, v in test_results.items() if not isinstance(v, (dict, list))}, + ) + + +class FedRepClient(BaseClient): + """Implementation of Federated Personalization (FedRep) Client.""" + + def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + """Return the current local body parameters.""" + return [ + val.cpu().numpy() + for val in self.model_manager.model.body.state_dict().values() + ] + + def set_parameters(self, parameters: List[np.ndarray], evaluate=False) -> None: + """Set the local body parameters to the received parameters. + + Args: + parameters: parameters to set the body to. + evaluate: whether the client is evaluating or not. + """ + model_keys = [ + k + for k in self.model_manager.model.state_dict().keys() + if k.startswith("_body") + ] + + if not evaluate: + # Only update client's local head if it hasn't trained yet + model_keys.extend( + [ + k + for k in self.model_manager.model.state_dict().keys() + if k.startswith("_head") + ] + ) + + state_dict = OrderedDict( + (k, torch.from_numpy(v)) for k, v in zip(model_keys, parameters) + ) + + self.model_manager.model.set_parameters(state_dict) + + +# pylint: disable=E1101, W0603 +def get_client_fn_simulation( + config: DictConfig, client_state_save_path: str = "" +) -> Callable[[str], Client]: + """Generate the client function that creates the Flower Clients. + + Parameters + ---------- + model : DictConfig + The model configuration. + cleint_state_save_path : str + The path to save the client state. + + Returns + ------- + Tuple[Callable[[str], FlowerClient], DataLoader] + A tuple containing the client function that creates Flower Clients and + the DataLoader that will be used for testing + """ + assert config.model_name.lower() in [ + "cnncifar10", + "cnncifar100", + ], f"Model {config.model_name} not implemented" + + # - you can define your own data transformation strategy here - + # These transformations are from the official repo + train_data_transform = transforms.Compose( + [ + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(MEAN[config.dataset.name], STD[config.dataset.name]), + ] + ) + test_data_transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize(MEAN[config.dataset.name], STD[config.dataset.name]), + ] + ) + + use_fine_label = False + if config.dataset.name.lower() == "cifar100": + use_fine_label = True + + partitioner = PathologicalPartitioner( + num_partitions=config.num_clients, + partition_by="fine_label" if use_fine_label else "label", + num_classes_per_partition=config.dataset.num_classes, + class_assignment_mode="random", + shuffle=True, + seed=config.dataset.seed, + ) + + global FEDERATED_DATASET + if FEDERATED_DATASET is None: + FEDERATED_DATASET = FederatedDataset( + dataset=config.dataset.name.lower(), + partitioners={"all": partitioner}, + preprocessor=Merger({"all": ("train", "test")}), + ) + + def apply_train_transforms(batch): + """Apply transforms for train data to the partition from FederatedDataset.""" + batch["img"] = [train_data_transform(img) for img in batch["img"]] + if use_fine_label: + batch["label"] = batch["fine_label"] + return batch + + def apply_test_transforms(batch): + """Apply transforms for test data to the partition from FederatedDataset.""" + batch["img"] = [test_data_transform(img) for img in batch["img"]] + if use_fine_label: + batch["label"] = batch["fine_label"] + return batch + + # pylint: disable=E1101 + def client_fn(cid: str) -> Client: + """Create a Flower client representing a single organization.""" + cid_use = int(cid) + + partition = FEDERATED_DATASET.load_partition(cid_use, split="all") + + partition_train_test = partition.train_test_split( + train_size=config.dataset.fraction, shuffle=True, seed=config.dataset.seed + ) + + trainset = partition_train_test["train"].with_transform(apply_train_transforms) + testset = partition_train_test["test"].with_transform(apply_test_transforms) + + trainloader = DataLoader(trainset, config.batch_size, shuffle=True) + testloader = DataLoader(testset, config.batch_size) + + model_manager_class: Union[ + Type[CNNCifar10ModelManager], Type[CNNCifar100ModelManager] + ] + if config.model_name.lower() == "cnncifar10": + model_manager_class = CNNCifar10ModelManager + elif config.model_name.lower() == "cnncifar100": + model_manager_class = CNNCifar100ModelManager + else: + raise NotImplementedError( + f"Model {config.model_name} not implemented, check name." + ) + + if config.algorithm.lower() == Algorithm.FEDREP.value: + return FedRepClient( # type: ignore[attr-defined] + client_id=cid_use, + trainloader=trainloader, + testloader=testloader, + config=config, + model_manager_class=model_manager_class, + client_state_save_path=client_state_save_path, + ).to_client() + return BaseClient( # type: ignore[attr-defined] + client_id=cid_use, + trainloader=trainloader, + testloader=testloader, + config=config, + model_manager_class=model_manager_class, + client_state_save_path=client_state_save_path, + ).to_client() + + return client_fn diff --git a/baselines/fedrep/fedrep/conf/base.yaml b/baselines/fedrep/fedrep/conf/base.yaml new file mode 100644 index 000000000000..0d74c4fe78b6 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/base.yaml @@ -0,0 +1,46 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar10 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null # the ID of cuda device, if null, then use defaults torch.device("cuda") + +dataset: + name: cifar10 + split: sample + num_classes: 2 + seed: 42 + num_clients: ${num_clients} + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar100.CNNCifar10 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar100_100_20.yaml b/baselines/fedrep/fedrep/conf/cifar100_100_20.yaml new file mode 100644 index 000000000000..30f9fd209d58 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar100_100_20.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar100 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar100 + num_classes: 20 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar100.CNNCifar100 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar100_100_5.yaml b/baselines/fedrep/fedrep/conf/cifar100_100_5.yaml new file mode 100644 index 000000000000..e0add8f03b45 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar100_100_5.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar100 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar100 + num_classes: 5 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar100.CNNCifar100 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar10_100_2.yaml b/baselines/fedrep/fedrep/conf/cifar10_100_2.yaml new file mode 100644 index 000000000000..83ee34a298ae --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar10_100_2.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar10 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar10 + num_classes: 2 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar10.CNNCifar10 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/conf/cifar10_100_5.yaml b/baselines/fedrep/fedrep/conf/cifar10_100_5.yaml new file mode 100644 index 000000000000..0cbd104406f0 --- /dev/null +++ b/baselines/fedrep/fedrep/conf/cifar10_100_5.yaml @@ -0,0 +1,44 @@ +--- +num_clients: 100 # total number of clients +num_local_epochs: 5 # number of local epochs +num_rep_epochs: 1 # number of representation epochs (only for FedRep) +enable_finetune: false +# num_finetune_epochs: 10 +batch_size: 50 +num_rounds: 100 +learning_rate: 0.01 +momentum: 0.5 +algorithm: fedrep +model_name: cnncifar10 + +client_resources: + num_cpus: 2 + num_gpus: 0.5 + +use_cuda: true +specified_device: null + +dataset: + name: cifar10 + num_classes: 5 + seed: 42 + fraction: 0.83 + +model: + _target_: fedrep.implemented_models.cnn_cifar10.CNNCifar10 + +fit_config: + drop_client: false + epochs: ${num_local_epochs} + batch_size: ${batch_size} + +strategy: + _target_: fedrep.strategy.FedRep + fraction_fit: 0.1 + fraction_evaluate: 0.1 + min_fit_clients: 2 + min_evaluate_clients: 2 + min_available_clients: 2 + evaluate_fn: null + on_fit_config_fn: null + on_evaluate_config_fn: null diff --git a/baselines/fedrep/fedrep/constants.py b/baselines/fedrep/fedrep/constants.py new file mode 100644 index 000000000000..27e68f2b786c --- /dev/null +++ b/baselines/fedrep/fedrep/constants.py @@ -0,0 +1,19 @@ +"""Constants used in machine learning pipeline.""" + +from enum import Enum + + +class Algorithm(Enum): + """Algorithm names.""" + + FEDREP = "fedrep" + FEDAVG = "fedavg" + + +DEFAULT_LOCAL_TRAIN_EPOCHS: int = 10 +DEFAULT_FINETUNE_EPOCHS: int = 5 +DEFAULT_REPRESENTATION_EPOCHS: int = 1 + +MEAN = {"cifar10": [0.485, 0.456, 0.406], "cifar100": [0.507, 0.487, 0.441]} + +STD = {"cifar10": [0.229, 0.224, 0.225], "cifar100": [0.267, 0.256, 0.276]} diff --git a/baselines/fedrep/fedrep/dataset.py b/baselines/fedrep/fedrep/dataset.py new file mode 100644 index 000000000000..a616e38ae220 --- /dev/null +++ b/baselines/fedrep/fedrep/dataset.py @@ -0,0 +1 @@ +"""FedRep uses flwr-datasets.""" diff --git a/baselines/fedrep/fedrep/dataset_preparation.py b/baselines/fedrep/fedrep/dataset_preparation.py new file mode 100644 index 000000000000..a616e38ae220 --- /dev/null +++ b/baselines/fedrep/fedrep/dataset_preparation.py @@ -0,0 +1 @@ +"""FedRep uses flwr-datasets.""" diff --git a/baselines/fedrep/fedrep/main.py b/baselines/fedrep/fedrep/main.py new file mode 100644 index 000000000000..223b98aa21fa --- /dev/null +++ b/baselines/fedrep/fedrep/main.py @@ -0,0 +1,123 @@ +"""Create and connect the building blocks for your experiments; start the simulation. + +It includes processioning the dataset, instantiate strategy, specify how the global +model is going to be evaluated, etc. At the end, this script saves the results. +""" + +from pathlib import Path +from typing import List, Tuple + +import flwr as fl +import hydra +from flwr.common.parameter import ndarrays_to_parameters +from flwr.common.typing import Metrics +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from fedrep.utils import ( + get_client_fn, + get_create_model_fn, + plot_metric_from_history, + save_results_as_pickle, + set_client_state_save_path, + set_client_strategy, +) + + +@hydra.main(config_path="conf", config_name="base", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameterss + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + # Print parsed config + print(OmegaConf.to_yaml(cfg)) + + # set client strategy + cfg = set_client_strategy(cfg) + + # Create directory to store client states if it does not exist + # Client state has subdirectories with the name of current time + client_state_save_path = set_client_state_save_path() + + # Define your clients + # Get client function + client_fn = get_client_fn(config=cfg, client_state_save_path=client_state_save_path) + + # get a function that will be used to construct the config that the client's + # fit() method will received + def get_on_fit_config(): + def fit_config_fn(server_round: int): + # resolve and convert to python dict + fit_config = OmegaConf.to_container(cfg.fit_config, resolve=True) + _ = server_round + return fit_config + + return fit_config_fn + + # get a function that will be used to construct the model + create_model, split = get_create_model_fn(cfg) + + model = split(create_model()) + + def evaluate_metrics_aggregation_fn( + eval_metrics: List[Tuple[int, Metrics]] + ) -> Metrics: + weights, accuracies = [], [] + for num_examples, metric in eval_metrics: + weights.append(num_examples) + accuracies.append(metric["accuracy"] * num_examples) + accuracy = sum(accuracies) / sum(weights) # type: ignore[arg-type] + return {"accuracy": accuracy} + + # Define your strategy + strategy = instantiate( + cfg.strategy, + initial_parameters=ndarrays_to_parameters(model.get_parameters()), + on_fit_config_fn=get_on_fit_config(), + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, + ) + + # Start Simulation + history = fl.simulation.start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + client_resources={ + "num_cpus": cfg.client_resources.num_cpus, + "num_gpus": cfg.client_resources.num_gpus, + }, + strategy=strategy, + ) + + # Experiment completed. Now we save the results and + # generate plots using the `history` + print("................") + print(history) + + # Save your results + save_path = Path(HydraConfig.get().runtime.output_dir) + + # save results as a Python pickle using a file_path + # the directory created by Hydra for each run + save_results_as_pickle(history, file_path=save_path) + # plot results and include them in the readme + strategy_name = strategy.__class__.__name__ + file_suffix: str = ( + f"_{strategy_name}" + f"_C={cfg.num_clients}" + f"_B={cfg.batch_size}" + f"_E={cfg.num_local_epochs}" + f"_R={cfg.num_rounds}" + f"_lr={cfg.learning_rate}" + ) + + plot_metric_from_history(history, save_path, (file_suffix)) + + +if __name__ == "__main__": + main() diff --git a/baselines/fedrep/fedrep/models.py b/baselines/fedrep/fedrep/models.py new file mode 100644 index 000000000000..b230f4e49766 --- /dev/null +++ b/baselines/fedrep/fedrep/models.py @@ -0,0 +1,130 @@ +"""Model, model manager and model split for CIFAR-10 and CIFAR-100.""" + +from typing import Tuple + +import torch +import torch.nn as nn + +from fedrep.base_model import ModelManager, ModelSplit + + +# pylint: disable=W0223 +class CNNCifar10(nn.Module): + """CNN model for CIFAR10 dataset. + + Refer to + https://github.com/rahulv0205/fedrep_experiments/blob/main/models/Nets.py + """ + + def __init__(self): + """Initialize the model.""" + super().__init__() + + # Note that in the official implementation, the body has no BN layers. + # However, no BN will definitely lead training to collapse. + self.body = nn.Sequential( + nn.Conv2d(3, 64, 5), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Conv2d(64, 64, 5), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Flatten(), + nn.Linear(64 * 5 * 5, 120), + nn.ReLU(), + nn.Linear(120, 64), + nn.ReLU(), + ) + + self.head = nn.Sequential(nn.Linear(64, 10)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward pass of the model.""" + x = self.body(x) + return self.head(x) + + +class CNNCifar10ModelSplit(ModelSplit): + """Split CNNCifar10 model into body and head.""" + + def _get_model_parts(self, model: CNNCifar10) -> Tuple[nn.Module, nn.Module]: + return model.body, model.head + + +# pylint: disable=R0902, R0913, R0801 +class CNNCifar10ModelManager(ModelManager): + """Manager for models with Body/Head split.""" + + def __init__(self, **kwargs): + """Initialize the attributes of the model manager. + + Args: + client_id: The id of the client. + config: Dict containing the configurations to be used by the manager. + """ + super().__init__(model_split_class=CNNCifar10ModelSplit, **kwargs) + + def _create_model(self) -> nn.Module: + """Return CNNCifar10 model to be splitted into head and body.""" + return CNNCifar10().to(self.device) + + +# pylint: disable=W0223 +class CNNCifar100(nn.Module): + """CNN model for CIFAR100 dataset. + + Refer to + https://github.com/rahulv0205/fedrep_experiments/blob/main/models/Nets.py + """ + + def __init__(self): + """Initialize the model.""" + super().__init__() + + # Note that in the official implementation, the body has no BN layers. + # However, no BN will definitely lead training to collapse. + self.body = nn.Sequential( + nn.Conv2d(3, 64, 5), + nn.BatchNorm2d(64), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Conv2d(64, 128, 5), + nn.BatchNorm2d(128), + nn.ReLU(), + nn.MaxPool2d(2, 2), + nn.Flatten(), + nn.Linear(128 * 5 * 5, 256), + nn.ReLU(), + nn.Linear(256, 128), + nn.ReLU(), + nn.Dropout(0.6), + ) + + self.head = nn.Sequential(nn.Linear(128, 100)) + + +class CNNCifar100ModelSplit(ModelSplit): + """Split CNNCifar100 model into body and head.""" + + def _get_model_parts(self, model: CNNCifar100) -> Tuple[nn.Module, nn.Module]: + return model.body, model.head + + +# pylint: disable=R0902, R0913, R0801 +class CNNCifar100ModelManager(ModelManager): + """Manager for models with Body/Head split.""" + + def __init__(self, **kwargs): + """Initialize the attributes of the model manager. + + Args: + client_id: The id of the client. + config: Dict containing the configurations to be used by the manager. + """ + super().__init__(model_split_class=CNNCifar100ModelSplit, **kwargs) + + def _create_model(self) -> CNNCifar100: + """Return CNNCifar100 model to be splitted into head and body.""" + return CNNCifar100().to(self.device) diff --git a/baselines/fedrep/fedrep/server.py b/baselines/fedrep/fedrep/server.py new file mode 100644 index 000000000000..5b0c34035ae6 --- /dev/null +++ b/baselines/fedrep/fedrep/server.py @@ -0,0 +1 @@ +"""Server strategies pipelines for FedRep.""" diff --git a/baselines/fedrep/fedrep/strategy.py b/baselines/fedrep/fedrep/strategy.py new file mode 100644 index 000000000000..3bee45326a6f --- /dev/null +++ b/baselines/fedrep/fedrep/strategy.py @@ -0,0 +1,12 @@ +"""FL server strategies.""" + +from flwr.server.strategy import FedAvg + + +class FedRep(FedAvg): + """FedRep strategy.""" + + def __repr__(self) -> str: + """Compute a string representation of the strategy.""" + rep = f"FedRep(accept_failures={self.accept_failures})" + return rep diff --git a/baselines/fedrep/fedrep/utils.py b/baselines/fedrep/fedrep/utils.py new file mode 100644 index 000000000000..b706ebf1e041 --- /dev/null +++ b/baselines/fedrep/fedrep/utils.py @@ -0,0 +1,204 @@ +"""Utility functions for FedRep.""" + +import logging +import os +import pickle +import time +from pathlib import Path +from secrets import token_hex +from typing import Callable, Optional, Type, Union + +import matplotlib.pyplot as plt +import numpy as np +from flwr.client import Client +from flwr.server.history import History +from omegaconf import DictConfig + +from fedrep.base_model import get_device +from fedrep.client import get_client_fn_simulation +from fedrep.constants import Algorithm +from fedrep.models import ( + CNNCifar10, + CNNCifar10ModelSplit, + CNNCifar100, + CNNCifar100ModelSplit, +) + + +def set_client_state_save_path() -> str: + """Set the client state save path.""" + client_state_save_path = time.strftime("%Y-%m-%d") + client_state_sub_path = time.strftime("%H-%M-%S") + client_state_save_path = ( + f"./client_states/{client_state_save_path}/{client_state_sub_path}" + ) + if not os.path.exists(client_state_save_path): + os.makedirs(client_state_save_path) + return client_state_save_path + + +# pylint: disable=W1202 +def set_client_strategy(cfg: DictConfig) -> DictConfig: + """Set the client strategy.""" + algorithm = cfg.algorithm.lower() + if algorithm == Algorithm.FEDREP.value: + cfg.strategy["_target_"] = "fedrep.strategy.FedRep" + elif algorithm == Algorithm.FEDAVG.value: + cfg.strategy["_target_"] = "flwr.server.strategy.FedAvg" + else: + logging.warning( + "Algorithm {} not implemented. Fallback to FedAvg.".format(algorithm) + ) + return cfg + + +def get_client_fn( + config: DictConfig, client_state_save_path: str = "" +) -> Callable[[str], Client]: + """Get client function.""" + # Get algorithm + algorithm = config.algorithm.lower() + # Get client fn + if algorithm == "fedrep": + client_fn = get_client_fn_simulation( + config=config, client_state_save_path=client_state_save_path + ) + elif algorithm == "fedavg": + client_fn = get_client_fn_simulation(config=config) + else: + raise NotImplementedError + return client_fn + + +def get_create_model_fn( + config: DictConfig, +) -> tuple[ + Callable[[], Union[type[CNNCifar10], type[CNNCifar100]]], + Union[type[CNNCifar10ModelSplit], type[CNNCifar100ModelSplit]], +]: + """Get create model function.""" + device = get_device( + use_cuda=getattr(config, "use_cuda", True), + specified_device=getattr(config, "specified_device", None), + ) + split: Union[Type[CNNCifar10ModelSplit], Type[CNNCifar100ModelSplit]] = ( + CNNCifar10ModelSplit + ) + if config.model_name.lower() == "cnncifar10": + + def create_model() -> Union[Type[CNNCifar10], Type[CNNCifar100]]: + """Create initial CNNCifar10 model.""" + return CNNCifar10().to(device) + + elif config.model_name.lower() == "cnncifar100": + split = CNNCifar100ModelSplit + + def create_model() -> Union[Type[CNNCifar10], Type[CNNCifar100]]: + """Create initial CNNCifar100 model.""" + return CNNCifar100().to(device) + + else: + raise NotImplementedError("Model not implemented, check name. ") + return create_model, split + + +def plot_metric_from_history( + hist: History, save_plot_path: Path, suffix: Optional[str] = "" +) -> None: + """Plot from Flower server History. + + Parameters + ---------- + hist : History + Object containing evaluation for all rounds. + save_plot_path : Path + Folder to save the plot to. + suffix: Optional[str] + Optional string to add at the end of the filename for the plot. + """ + metric_type = "distributed" + metric_dict = ( + hist.metrics_centralized + if metric_type == "centralized" + else hist.metrics_distributed + ) + try: + _, values = zip(*metric_dict["accuracy"]) + except KeyError: # If no available metric data + return + + # let's extract decentralized loss (main metric reported in FedProx paper) + rounds_loss, values_loss = zip(*hist.losses_distributed) + + _, axs = plt.subplots(nrows=2, ncols=1, sharex="row") + axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss)) # type: ignore + axs[1].plot(np.asarray(rounds_loss), np.asarray(values)) # type: ignore + + axs[0].set_ylabel("Loss") # type: ignore + axs[1].set_ylabel("Accuracy") # type: ignore + + axs[0].grid() # type: ignore + axs[1].grid() # type: ignore + # plt.title(f"{metric_type.capitalize()} Validation - MNIST") + plt.xlabel("Rounds") + # plt.legend(loc="lower right") + + plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png")) + plt.close() + + +def save_results_as_pickle( + history: History, + file_path: Union[str, Path], + default_filename: Optional[str] = "results.pkl", +) -> None: + """Save results from simulation to pickle. + + Parameters + ---------- + history: History + History returned by start_simulation. + file_path: Union[str, Path] + Path to file to create and store both history and extra_results. + If path is a directory, the default_filename will be used. + path doesn't exist, it will be created. If file exists, a + randomly generated suffix will be added to the file name. This + is done to avoid overwritting results. + extra_results : Optional[Dict] + A dictionary containing additional results you would like + to be saved to disk. Default: {} (an empty dictionary) + default_filename: Optional[str] + File used by default if file_path points to a directory instead + to a file. Default: "results.pkl" + """ + path = Path(file_path) + + # ensure path exists + path.mkdir(exist_ok=True, parents=True) + + def _add_random_suffix(path_: Path): + """Add a random suffix to the file name.""" + print(f"File `{path_}` exists! ") + suffix = token_hex(4) + print(f"New results to be saved with suffix: {suffix}") + return path_.parent / (path_.stem + "_" + suffix + ".pkl") + + def _complete_path_with_default_name(path_: Path): + """Append the default file name to the path.""" + print("Using default filename") + if default_filename is None: + return path_ + return path_ / default_filename + + if path.is_dir(): + path = _complete_path_with_default_name(path) + + if path.is_file(): + path = _add_random_suffix(path) + + print(f"Results will be saved into: {path}") + # data = {"history": history, **extra_results} + data = {"history": history} + # save results to pickle + with open(str(path), "wb") as handle: + pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) diff --git a/baselines/baseline_template/pyproject.toml b/baselines/fedrep/pyproject.toml similarity index 81% rename from baselines/baseline_template/pyproject.toml rename to baselines/fedrep/pyproject.toml index 31f1ee7bfe6d..e4c3551af19a 100644 --- a/baselines/baseline_template/pyproject.toml +++ b/baselines/fedrep/pyproject.toml @@ -3,11 +3,11 @@ requires = ["poetry-core>=1.4.0"] build-backend = "poetry.masonry.api" [tool.poetry] -name = "" # <----- Ensure it matches the name of your baseline directory containing all the source code +name = "fedrep" version = "1.0.0" -description = "Flower Baselines" +description = "Exploiting Shared Representations for Personalized Federated Learning" license = "Apache-2.0" -authors = ["The Flower Authors "] +authors = ["Jiahao Tan "] readme = "README.md" homepage = "https://flower.ai" repository = "https://github.com/adap/flower" @@ -37,9 +37,16 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.8.15, <3.12.0" # don't change this -flwr = { extras = ["simulation"], version = "1.5.0" } -hydra-core = "1.3.2" # don't change this +python = ">=3.10.0, <3.11.0" # don't change this +flwr = { extras = ["simulation"], version = "1.9.0" } +hydra-core = "1.3.2" # don't change this +pandas = "^2.2.2" +matplotlib = "^3.9.0" +tqdm = "^4.66.4" +torch = "^2.2.2" +torchvision = "^0.17.2" +setuptools = "<70" +flwr-datasets = { extras = ["vision"], version = ">=0.3.0" } [tool.poetry.dev-dependencies] isort = "==5.13.2" @@ -52,6 +59,7 @@ pytest = "==6.2.4" pytest-watch = "==4.2.0" ruff = "==0.0.272" types-requests = "==2.27.7" +virtualenv = "==20.21.0" [tool.isort] line_length = 88 @@ -68,9 +76,7 @@ target-version = ["py38", "py39", "py310", "py311"] [tool.pytest.ini_options] minversion = "6.2" addopts = "-qq" -testpaths = [ - "flwr_baselines", -] +testpaths = ["flwr_baselines"] [tool.mypy] ignore_missing_imports = true @@ -78,18 +84,14 @@ strict = false plugins = "numpy.typing.mypy_plugin" [tool.pylint."MESSAGES CONTROL"] -disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" good-names = "i,j,k,_,x,y,X,Y" signature-mutators = "hydra.main.main" -[tool.pylint.typecheck] +[tool.pylint."TYPECHECK"] generated-members = "numpy.*, torch.*, tensorflow.*" [[tool.mypy.overrides]] -module = [ - "importlib.metadata.*", - "importlib_metadata.*", -] +module = ["importlib.metadata.*", "importlib_metadata.*"] follow_imports = "skip" follow_imports_for_stubs = true disallow_untyped_calls = false diff --git a/benchmarks/flowertune-llm/README.md b/benchmarks/flowertune-llm/README.md index 0cb69e7ff9c7..c3e1b2b7dd53 100644 --- a/benchmarks/flowertune-llm/README.md +++ b/benchmarks/flowertune-llm/README.md @@ -1,4 +1,4 @@ -![](_static/flower_llm.jpg) +[![FlowerTune LLM Leaderboard](_static/flower_llm.png)](https://flower.ai/benchmarks/llm-leaderboard) # FlowerTune LLM Leaderboard @@ -9,39 +9,41 @@ Please follow the instructions to run and evaluate the federated LLMs. ## Create a new project -As the first step, please register a Flower account on [Flower website](https://flower.ai/login). -Assuming `flwr` package is already installed on your system (check [here](https://flower.ai/docs/framework/how-to-install-flower.html) for `flwr` installation). -We provide a single-line command to create a new project directory based on your selected challenge: +As the first step, please register for a Flower account on [flower.ai/login](https://flower.ai/login). +Then, create a new Python environment and install Flower. + +> [!TIP] +> We recommend using `pyenv` with the `virtualenv` plugin to create your environment with Python >= 3.10.0. Other managers, such as Conda, will likely work as well. Check the [documentation](https://flower.ai/docs/framework/how-to-install-flower.html) for alternative ways to install Flower. ```shell -flwr new --framework=flwrtune --username=your_flower_account +pip install flwr ``` -Then you will see a prompt to ask your project name and the choice of LLM challenges from the set of general NLP, finance, medical and code. -Type your project name and select your preferred challenge, -and then a new project directory will be generated automatically. - -### Structure +In the new environment, create a new Flower project using the `FlowerTune` template. You will be prompted for a name to give to your app/project, your username, and for your choice of LLM challenge: +```shell +flwr new --framework=FlowerTune +``` -After running `flwr new`, you will see a new directory generated with the following structure: +The `flwr new` command will generate a directory with the following structure: ```bash - - ├── README.md # <- Instructions - ├── pyproject.toml # <- Environment dependencies - └── - ├── app.py # <- Flower ClientApp/ServerApp build - ├── client.py # <- Flower client constructor - ├── server.py # <- Sever-related functions - ├── models.py # <- Model build - ├── dataset.py # <- Dataset and tokenizer build - ├── conf/config.yaml # <- User configuration - └── conf/static_config.yaml # <- Static configuration + +├── README.md # Instructions +├── pyproject.toml # Environment dependencies and configs +└── + ├── __init__.py + ├── client_app.py # Flower ClientApp build + ├── dataset.py # Dataset and tokenizer build + ├── models.py # Model build + ├── server_app.py # Flower ServerApp build + └── strategy.py # Flower strategy build ``` This can serve as the starting point for you to build up your own federated LLM fine-tuning methods. -Please note that any modification to the content of `conf/static_config.yaml` is strictly prohibited for those who wish to participate in the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard). -Otherwise, the submission will not be considered. + +> [!IMPORTANT] +> Please note that if you intend to submit your project as an entry to the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard) modifications to the `[tool.flwr.app.config.static]` section and `options.num-supernodes` under the `[tool.flwr.federations.local-simulation]` section in the `pyproject.toml` are not allowed and will invalidate the submission. + ## Run FlowerTune LLM challenges @@ -50,12 +52,17 @@ With a new project directory created, running a baseline challenge can be done b 1. Navigate inside the directory that you just created. -2. Follow the `Environments setup` section of `README.md` in the project directory to install project dependencies. +2. Follow the `Environments setup` section of `README.md` in the project directory to install the project dependencies. 3. Run the challenge as indicated in the `Running the challenge` section in the `README.md`. -## Evaluate pre-trained LLMs +## Evaluate fine-tuned LLMs + +Once the LLM fine-tuning finished, evaluate the performance of your fine-tuned LLM +following the `README.md` in [`evaluation`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation) directory. + -After the LLM fine-tuning finished, evaluate the performance of your pre-trained LLMs -following the `README.md` in `evaluation` directory. +> [!NOTE] +> If you have any questions about running FlowerTune LLM challenges or evaluation, please feel free to make posts at our dedicated [FlowerTune Category](https://discuss.flower.ai/c/flowertune-llm-leaderboard/) on [Flower Discuss](https://discuss.flower.ai) forum, +or join our [Slack channel](https://flower.ai/join-slack/) to ask questions in the `#flowertune-llm-leaderboard` channel. diff --git a/benchmarks/flowertune-llm/_static/flower_llm.jpg b/benchmarks/flowertune-llm/_static/flower_llm.jpg deleted file mode 100644 index 96081d9c2ad1..000000000000 Binary files a/benchmarks/flowertune-llm/_static/flower_llm.jpg and /dev/null differ diff --git a/benchmarks/flowertune-llm/_static/flower_llm.png b/benchmarks/flowertune-llm/_static/flower_llm.png new file mode 100644 index 000000000000..e9a0ba3bf30e Binary files /dev/null and b/benchmarks/flowertune-llm/_static/flower_llm.png differ diff --git a/benchmarks/flowertune-llm/evaluation/README.md b/benchmarks/flowertune-llm/evaluation/README.md new file mode 100644 index 000000000000..e8ac82d1ccee --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/README.md @@ -0,0 +1,49 @@ +# FlowerTune LLM Evaluation + +This directory provides various evaluation metrics to assess the quality of your fine-tuned LLMs. +If you are participating [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard), evaluating your fine-tuned LLM is the final step prior to have your submission added to the [LLM Leaderboard](https://flower.ai/benchmarks/llm-leaderboard#how-to-participate). The evaluation scores generated here will be displayed as the definitive values on the LLM Leaderboard. + +## How to run + +Navigate to the directory corresponding to your selected challenge ([`general NLP`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/general-nlp), [`finance`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/finance), [`medical`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/medical), or [`code`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation/code)) and follow the instructions there to execute the evaluation. + +> [!NOTE] +> If you wish to participate in the LLM Leaderboard, you must not modify the evaluation code and should use the exact command provided in the respective directory to run the evaluation. + + +## Baseline results + +The default template generated by `flwr new` (see the [Project Creation Instructions](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm#create-a-new-project)) for each challenge will produce results as follows, which serve as the lower bound on the LLM Leaderboard. + +### General NLP + +| | STEM | SS | Humanities | Avg | +|:-------:|:-----:|:-----:|:----------:|:-----:| +| Acc (%) | 12.37 | 13.49 | 12.60 | 12.82 | + +### Finance + +| | FPB | FIQA | TFNS | Avg | +|:-------:|:-----:|:-----:|:-----:|:-----:| +| Acc (%) | 44.55 | 62.50 | 28.77 | 45.27 | + +### Medical + +| | PubMedQA | MedMCQA | MedQA | Avg | +|:-------:|:--------:|:-------:|:-----:|:-----:| +| Acc (%) | 59.00 | 23.69 | 27.10 | 36.60 | + +### Code + +| | MBPP | HumanEval | MultiPL-E (JS) | MultiPL-E (C++) | Avg | +|:----------:|:-----:|:---------:|:--------------:|:---------------:|:-----:| +| Pass@1 (%) | 31.60 | 23.78 | 28.57 | 25.47 | 27.36 | + +> [!NOTE] +> In the LLM Leaderboard, we rank the submissions based on the **average** value derived from different evaluation datasets for each challenge. + + +## Make submission on FlowerTune LLM Leaderboard + +If your LLM outperforms the listed benchmarks in any challenge, +we encourage you to submit your code and model to the FlowerTune LLM Leaderboard without hesitation (see the [How-to-participate Instructions](https://flower.ai/benchmarks/llm-leaderboard#how-to-participate)). diff --git a/benchmarks/flowertune-llm/evaluation/code/README.md b/benchmarks/flowertune-llm/evaluation/code/README.md new file mode 100644 index 000000000000..fd63ced2f1e2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/code/README.md @@ -0,0 +1,70 @@ +# Evaluation for Code challenge + +We leverage the code generation evaluation metrics provided by [bigcode-evaluation-harness](https://github.com/bigcode-project/bigcode-evaluation-harness/tree/main) to evaluate our fine-tuned LLMs. +Three datasets have been selected for this evaluation: [MBPP](https://huggingface.co/datasets/google-research-datasets/mbpp) (Python), [HumanEval](https://huggingface.co/datasets/openai/openai_humaneval) (Python), and [MultiPL-E](https://github.com/nuprl/MultiPL-E) (JavaScript, C++). + +> [!WARNING] +> The evaluation process takes ~30 GB VRAM. On a 40GB A100 it requires 15-30mins depending on the dataset to complete. + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/code ./flowertune-eval-code && rm -rf flower && cd flowertune-eval-code +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +After that, install `Node.js` and `g++` for the evaluation of JavaScript, C++: + +```shell +# Install nvm (Node Version Manager) +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash + +# Restart your terminal + +# Download and install Node.js (you may need to restart the terminal) +nvm install 20 + +# Install g++ +sudo apt-get install g++ +``` + +Then, download the `main.py` script from `bigcode-evaluation-harness` repository. + +```shell +git clone https://github.com/bigcode-project/bigcode-evaluation-harness.git && cd bigcode-evaluation-harness && git checkout 0f3e95f0806e78a4f432056cdb1be93604a51d69 && mv main.py ../ && cd .. && rm -rf bigcode-evaluation-harness +``` + + +## Generate model answers & calculate pass@1 score + +> [!NOTE] +> Evaluation needs to be run on MBPP, HumanEval, MultiPL-E (JS) and MultiPL-E (C++). + +```bash +python main.py \ +--model=mistralai/Mistral-7B-v0.3 \ +--peft_model=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--max_length_generation=1024 \ # change to 2048 when running mbpp +--batch_size=4 \ +--use_auth_token \ +--allow_code_execution \ +--save_generations \ +--save_references \ +--tasks=humaneval \ # chosen from [mbpp, humaneval, multiple-js, multiple-cpp] +--metric_output_path=./evaluation_results_humaneval.json # change dataset name based on your choice +``` + +The model answers and pass@1 scores will be saved to `generations_{dataset_name}.json` and `evaluation_results_{dataset_name}.json`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **four pass@1 scores** for the evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/code/requirements.txt b/benchmarks/flowertune-llm/evaluation/code/requirements.txt new file mode 100644 index 000000000000..9c9e3f8e27a1 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/code/requirements.txt @@ -0,0 +1,8 @@ +peft==0.6.2 +datasets==2.20.0 +evaluate==0.3.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 +git+https://github.com/bigcode-project/bigcode-evaluation-harness.git@0f3e95f0806e78a4f432056cdb1be93604a51d69 diff --git a/benchmarks/flowertune-llm/evaluation/finance/README.md b/benchmarks/flowertune-llm/evaluation/finance/README.md new file mode 100644 index 000000000000..b5595433a238 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/README.md @@ -0,0 +1,40 @@ +# Evaluation for Finance challenge + +We build a sentiment classification pipeline on finance-related text to evaluate our fine-tuned LLMs. +Three datasets have been selected for this evaluation: [FPB](https://huggingface.co/datasets/takala/financial_phrasebank), [FIQA](https://huggingface.co/datasets/pauri32/fiqa-2018), and [TFNS](https://huggingface.co/datasets/zeroshot/twitter-financial-news-sentiment). + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/finance ./flowertune-eval-finance && rm -rf flower && cd flowertune-eval-finance +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=32 \ +--quantization=4 \ +--datasets=fpb,fiqa,tfns +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{run_name}.txt`, respectively. + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (FPB, FIQA, TFNS)** for three evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py new file mode 100644 index 000000000000..2b1a174e571f --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/benchmarks.py @@ -0,0 +1,135 @@ +import torch +from sklearn.metrics import accuracy_score +from tqdm import tqdm +from utils import ( + add_instruct, + change_target, + format_example, + generate_label, + load_data, + save_results, +) + + +def infer_fiqa(model, tokenizer, batch_size, run_name): + name = "fiqa" + dataset = load_data("pauri32/fiqa-2018", concat=True) + + # Post process + dataset["output"] = dataset.sentiment_score.apply(generate_label) + dataset["instruction"] = dataset.apply(add_instruct, axis=1) + dataset = dataset[["sentence", "output", "instruction"]] + dataset.columns = ["input", "output", "instruction"] + + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # Print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def infer_fpb(model, tokenizer, batch_size, run_name): + name = "fpb" + dataset = load_data("takala/financial_phrasebank", "sentences_50agree") + + # Post process + dataset.columns = ["input", "output"] + dic = {0: "negative", 1: "neutral", 2: "positive"} + dataset["output"] = dataset["output"].apply(lambda x: dic[x]) + + dataset["instruction"] = ( + "What is the sentiment of this news? Please choose an answer from {negative/neutral/positive}." + ) + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # Print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def infer_tfns(model, tokenizer, batch_size, run_name): + name = "tfns" + dataset = load_data( + "zeroshot/twitter-financial-news-sentiment", valid_set="validation" + ) + + # Post process + dic = {0: "negative", 1: "positive", 2: "neutral"} + dataset["label"] = dataset["label"].apply(lambda x: dic[x]) + + dataset["instruction"] = ( + "What is the sentiment of this tweet? Please choose an answer from {negative/neutral/positive}." + ) + + dataset.columns = ["input", "output", "instruction"] + dataset[["context", "target"]] = dataset.apply( + format_example, axis=1, result_type="expand" + ) + + # print example + print(f"\n\nPrompt example:\n{dataset['context'][0]}\n\n") + + # Run inference + dataset, acc = inference(dataset, model, tokenizer, batch_size) + + # Save results and generations + save_results(name, run_name, dataset, acc) + + +def inference(dataset, model, tokenizer, batch_size): + context = dataset["context"].tolist() + + last_batch = dataset.shape[0] % batch_size + total_steps = dataset.shape[0] // batch_size + 1 + print( + f"Total len: {len(context)}. Batch size: {batch_size}. Total steps: {total_steps}" + ) + + out_text_list = [] + for i in tqdm(range(total_steps)): + idx_s = i * batch_size + tmp_context = ( + context[idx_s : idx_s + last_batch] + if i == total_steps - 1 + else context[idx_s : idx_s + batch_size] + ) + + if tmp_context: + tokens = tokenizer( + tmp_context, + return_tensors="pt", + padding=True, + max_length=512, + return_token_type_ids=False, + ) + for k in tokens.keys(): + tokens[k] = tokens[k].cuda() + res = model.generate( + **tokens, max_length=512, eos_token_id=tokenizer.eos_token_id + ) + res_sentences = [tokenizer.decode(i, skip_special_tokens=True) for i in res] + out_text = [o.split("Answer: ")[1] for o in res_sentences] + out_text_list += out_text + torch.cuda.empty_cache() + + dataset["out_text"] = out_text_list + dataset["new_target"] = dataset["target"].apply(change_target) + dataset["new_out"] = dataset["out_text"].apply(change_target) + + acc = accuracy_score(dataset["new_target"], dataset["new_out"]) + + return dataset, acc diff --git a/benchmarks/flowertune-llm/evaluation/finance/eval.py b/benchmarks/flowertune-llm/evaluation/finance/eval.py new file mode 100644 index 000000000000..3e85b2fe21af --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/eval.py @@ -0,0 +1,64 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import infer_fiqa, infer_fpb, infer_tfns + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument("--datasets", type=str, default="fpb") +parser.add_argument("--batch-size", type=int, default=32) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +if not tokenizer.pad_token or tokenizer.pad_token_id == tokenizer.eos_token_id: + tokenizer.add_special_tokens({"pad_token": "[PAD]"}) + model.resize_token_embeddings(len(tokenizer)) + + +# Evaluate +model = model.eval() +with torch.no_grad(): + for dataset in args.datasets.split(","): + if dataset == "fpb": + infer_fpb(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "fiqa": + infer_fiqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "tfns": + infer_tfns(model, tokenizer, args.batch_size, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/finance/requirements.txt b/benchmarks/flowertune-llm/evaluation/finance/requirements.txt new file mode 100644 index 000000000000..89dcf40b819f --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/requirements.txt @@ -0,0 +1,7 @@ +peft==0.6.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 diff --git a/benchmarks/flowertune-llm/evaluation/finance/utils.py b/benchmarks/flowertune-llm/evaluation/finance/utils.py new file mode 100644 index 000000000000..900d1de3e096 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/finance/utils.py @@ -0,0 +1,70 @@ +import os + +import datasets +from datasets import Dataset + + +def load_data(dataset_path, name=None, concat=False, valid_set=None): + dataset = datasets.load_dataset(dataset_path, name, trust_remote_code=True) + + if concat: + dataset = datasets.concatenate_datasets( + [dataset["train"], dataset["validation"], dataset["test"]] + ) + + if valid_set: + dataset = dataset[valid_set] + else: + dataset = dataset if concat else dataset["train"] + dataset = dataset.train_test_split(0.25, seed=42)["test"] + + dataset = dataset.to_pandas() + + return dataset + + +def format_example(example: dict): + context = f"Instruction: {example['instruction']}\n" + if example.get("input"): + context += f"Input: {example['input']}\n" + context += "Answer: " + target = example["output"] + return {"context": context, "target": target} + + +def generate_label(value): + return "negative" if value < -0.1 else "neutral" if value < 0.1 else "positive" + + +def add_instruct(content): + tag = "tweet" if content.format == "post" else "news" + return f"What is the sentiment of this {tag}? Please choose an answer from {{negative/neutral/positive}}." + + +def change_target(x): + if "positive" in x or "Positive" in x: + return "positive" + elif "negative" in x or "Negative" in x: + return "negative" + else: + return "neutral" + + +def save_results(dataset_name, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + results_path = os.path.join(path, f"acc_{dataset_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join(path, f"generation_{dataset_name}_{run_name}.jsonl") + dataset = Dataset.from_pandas(dataset) + dataset = dataset.remove_columns( + ["input", "output", "instruction", "target", "out_text"] + ) + dataset.to_json(generation_path, orient="records") diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/README.md b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md new file mode 100644 index 000000000000..c3fd71da6ea2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/README.md @@ -0,0 +1,41 @@ +# Evaluation for General NLP challenge + +We build up a multi-task language understanding pipeline to evaluate our fined-tuned LLMs. +The [MMLU](https://huggingface.co/datasets/lukaemon/mmlu) dataset is used for this evaluation, encompassing three categories: STEM, social sciences (SS), and humanities. + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/general-nlp ./flowertune-eval-general-nlp && rm -rf flower && cd flowertune-eval-general-nlp +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=16 \ +--quantization=4 \ +--category=stem,social_sciences,humanities +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{category_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{category_name}_{run_name}.txt`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (STEM, SS, Humanities)** for three evaluation categories when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/benchmarks.py b/benchmarks/flowertune-llm/evaluation/general-nlp/benchmarks.py new file mode 100644 index 000000000000..c20522e7ed79 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/benchmarks.py @@ -0,0 +1,201 @@ +import json + +import pandas as pd +from sklearn.metrics import accuracy_score +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import format_answer, format_example, save_results + +from datasets import Dataset, load_dataset + +INSTRUCTIONS = { + "mmlu": "Answer the following multiple choice question.", +} + +MMLU_CATEGORY = { + "stem": [ + "abstract_algebra", + "anatomy", + "astronomy", + "college_biology", + "college_chemistry", + "college_computer_science", + "college_mathematics", + "college_physics", + "computer_security", + "conceptual_physics", + "electrical_engineering", + "elementary_mathematics", + "high_school_biology", + "high_school_chemistry", + "high_school_computer_science", + "high_school_mathematics", + "high_school_physics", + "high_school_statistics", + "machine_learning", + ], + "social_sciences": [ + "econometrics", + "high_school_geography", + "high_school_government_and_politics", + "high_school_macroeconomics", + "high_school_microeconomics", + "high_school_psychology", + "human_sexuality", + "professional_psychology", + "public_relations", + "security_studies", + "sociology", + "us_foreign_policy", + ], + "humanities": [ + "formal_logic", + "high_school_european_history", + "high_school_us_history", + "high_school_world_history", + "international_law", + "jurisprudence", + "logical_fallacies", + "moral_disputes", + "moral_scenarios", + "philosophy", + "prehistory", + "professional_law", + "world_religions", + ], + "other": [ + "business_ethics", + "clinical_knowledge", + "college_medicine", + "global_facts", + "human_aging", + "management", + "marketing", + "medical_genetics", + "miscellaneous", + "nutrition", + "professional_accounting", + "professional_medicine", + "virology", + ], +} + + +def infer_mmlu(model, tokenizer, batch_size, category, run_name): + name = "mmlu" + answer_type = "mcq" + + # Download dataset + dataframes = [] + for subset in MMLU_CATEGORY[category]: + subset_data = load_dataset( + "lukaemon/mmlu", + subset, + split="test", + trust_remote_code=True, + ) + subset_df = pd.DataFrame(subset_data.map(lambda x: {"subset": subset, **x})) + dataframes.append(subset_df) + + dataset_df = pd.concat(dataframes, axis=0) + dataset = Dataset.from_pandas(dataset_df) + if "__index_level_0__" in dataset.column_names: + dataset = dataset.remove_columns("__index_level_0__") + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + options = [row["A"], row["B"], row["C"], row["D"]] + row["prompt"] = format_example(row["input"], options) + row["gold"] = row["target"] + row["subset"] = row["subset"] + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type, category + ) + + +def generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type, category +): + # Run inference + prediction = inference(dataset, model, tokenizer, batch_size) + + # Calculate accuracy + acc = accuracy_compute(prediction, answer_type) + + # Save results and generations + save_results(name, category, run_name, prediction, acc) + + +def inference(dataset, model, tokenizer, batch_size): + columns_process = ["prompt", "gold"] + if "subset" in dataset.features: + columns_process.append("subset") + dataset_process = pd.DataFrame(dataset, columns=dataset.features)[columns_process] + dataset_process = dataset_process.assign(output="Null") + temperature = 1.0 + + inference_data = json.loads(dataset_process.to_json(orient="records")) + data_loader = DataLoader(inference_data, batch_size=batch_size, shuffle=False) + + batch_counter = 0 + for batch in tqdm(data_loader, total=len(data_loader), position=0, leave=True): + prompts = [ + f"<|im_start|>question\n{prompt}<|im_end|>\n<|im_start|>answer\n" + for prompt in batch["prompt"] + ] + if batch_counter == 0: + print(prompts[0]) + + # Process tokenizer + stop_seq = ["###"] + if tokenizer.eos_token is not None: + stop_seq.append(tokenizer.eos_token) + if tokenizer.pad_token is not None: + stop_seq.append(tokenizer.pad_token) + max_new_tokens = len( + tokenizer(batch["gold"][0], add_special_tokens=False)["input_ids"] + ) + + outputs = [] + for prompt in prompts: + input_ids = tokenizer.encode(prompt, return_tensors="pt").to("cuda") + output_ids = model.generate( + inputs=input_ids, + max_new_tokens=max_new_tokens, + do_sample=False, + top_p=1.0, + temperature=temperature, + pad_token_id=tokenizer.eos_token_id, + ) + output_ids = output_ids[0][len(input_ids[0]) :] + output = tokenizer.decode(output_ids, skip_special_tokens=True) + outputs.append(output) + + for prompt, out in zip(batch["prompt"], outputs): + dataset_process.loc[dataset_process["prompt"] == prompt, "output"] = out + batch_counter += 1 + + return dataset_process + + +def accuracy_compute(dataset, answer_type): + dataset = json.loads(dataset.to_json(orient="records")) + preds, golds = [], [] + for row in dataset: + answer = row["gold"].lower() + output = row["output"].lower() + pred, gold = format_answer(output, answer, answer_type=answer_type) + preds.append(pred) + golds.append(gold) + + accuracy = accuracy_score(preds, golds) + + return accuracy diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/eval.py b/benchmarks/flowertune-llm/evaluation/general-nlp/eval.py new file mode 100644 index 000000000000..c50928610c44 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/eval.py @@ -0,0 +1,68 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import MMLU_CATEGORY, infer_mmlu + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument( + "--datasets", + type=str, + default="mmlu", + help="The dataset to infer on", +) +parser.add_argument( + "--category", + type=str, + default=None, + help="The category for MMLU dataset, chosen from [stem, social_sciences, humanities, other]", +) +parser.add_argument("--batch-size", type=int, default=16) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +# Evaluate +for dataset in args.datasets.split(","): + if dataset == "mmlu": + for cate in args.category.split(","): + if cate not in MMLU_CATEGORY.keys(): + raise ValueError("Undefined Category.") + else: + infer_mmlu(model, tokenizer, args.batch_size, cate, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt b/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt new file mode 100644 index 000000000000..f5c46e869ce2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/requirements.txt @@ -0,0 +1,8 @@ +peft==0.6.2 +pandas==2.2.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 diff --git a/benchmarks/flowertune-llm/evaluation/general-nlp/utils.py b/benchmarks/flowertune-llm/evaluation/general-nlp/utils.py new file mode 100644 index 000000000000..71334ca6c199 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/general-nlp/utils.py @@ -0,0 +1,84 @@ +import os +import re + + +def format_example(question, choices): + if not question.endswith("?") and not question.endswith("."): + question += "?" + options_str = "\n".join([f"{chr(65+i)}. {choices[i]}" for i in range(len(choices))]) + prompt = "Question: " + question + "\n\nOptions:\n" + options_str + return prompt + + +def save_results(dataset_name, category, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + cate_name = f"_{category}" if category else "" + results_path = os.path.join(path, f"acc_{dataset_name}{cate_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join( + path, f"generation_{dataset_name}{cate_name}_{run_name}.jsonl" + ) + dataset.to_json(generation_path, orient="records") + + +def format_answer(output_full, answer, answer_type="mcq"): + output = output_full + default = (output_full, answer) + if "\n##" in output: + try: + output = output.split("\n##")[1].split("\n")[0].strip().lower() + except Exception: + return default + if "###" in answer: + try: + answer = answer.split("answer is:")[1].split("###")[0].strip() + except Exception: + return default + + output = re.sub(r"[^a-zA-Z0-9]", " ", output).strip() + output = re.sub(" +", " ", output) + + if answer_type == "boolean": + output = clean_boolean_answer(output) + elif answer_type == "mcq": + output = clean_mcq_answer(output) + + if output in ["a", "b", "c", "d", "e", "yes", "no"]: + return output, answer + else: + return default + + +def clean_mcq_answer(output): + output = clean_answer(output) + try: + output = output[0] + except Exception: + return output + return output + + +def clean_boolean_answer(output): + if "yesyes" in output: + output = output.replace("yesyes", "yes") + elif "nono" in output: + output = output.replace("nono", "no") + elif "yesno" in output: + output = output.replace("yesno", "yes") + elif "noyes" in output: + output = output.replace("noyes", "no") + output = clean_answer(output) + return output + + +def clean_answer(output): + output_clean = output.encode("ascii", "ignore").decode("ascii") + return output_clean diff --git a/benchmarks/flowertune-llm/evaluation/medical/README.md b/benchmarks/flowertune-llm/evaluation/medical/README.md new file mode 100644 index 000000000000..628489ce8de6 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/README.md @@ -0,0 +1,41 @@ +# Evaluation for Medical challenge + +We build up a medical question answering (QA) pipeline to evaluate our fined-tuned LLMs. +Three datasets have been selected for this evaluation: [PubMedQA](https://huggingface.co/datasets/bigbio/pubmed_qa), [MedMCQA](https://huggingface.co/datasets/medmcqa), and [MedQA](https://huggingface.co/datasets/bigbio/med_qa). + + +## Environment Setup + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/benchmarks/flowertune-llm/evaluation/medical ./flowertune-eval-medical && rm -rf flower && cd flowertune-eval-medical +``` + +Create a new Python environment (we recommend Python 3.10), activate it, then install dependencies with: + +```shell +# From a new python environment, run: +pip install -r requirements.txt + +# Log in HuggingFace account +huggingface-cli login +``` + +## Generate model decision & calculate accuracy + +> [!NOTE] +> Please ensure that you use `quantization=4` to run the evaluation if you wish to participate in the LLM Leaderboard. + +```bash +python eval.py \ +--peft-path=/path/to/fine-tuned-peft-model-dir/ \ # e.g., ./peft_1 +--run-name=fl \ # specified name for this run +--batch-size=16 \ +--quantization=4 \ +--datasets=pubmedqa,medmcqa,medqa +``` + +The model answers and accuracy values will be saved to `benchmarks/generation_{dataset_name}_{run_name}.jsonl` and `benchmarks/acc_{dataset_name}_{run_name}.txt`, respectively. + + +> [!NOTE] +> Please ensure that you provide all **three accuracy values (PubMedQA, MedMCQA, MedQA)** for three evaluation datasets when submitting to the LLM Leaderboard (see the [`Make Submission`](https://github.com/adap/flower/tree/main/benchmarks/flowertune-llm/evaluation#make-submission-on-flowertune-llm-leaderboard) section). diff --git a/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py b/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py new file mode 100644 index 000000000000..c72e2a7894da --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/benchmarks.py @@ -0,0 +1,174 @@ +import json + +import pandas as pd +from sklearn.metrics import accuracy_score +from torch.utils.data import DataLoader +from tqdm import tqdm +from utils import format_answer, format_example, save_results + +import datasets + +# The instructions refer to Meditron evaluation: +# https://github.com/epfLLM/meditron/blob/main/evaluation/instructions.json +INSTRUCTIONS = { + "pubmedqa": "As an expert doctor in clinical science and medical knowledge, can you tell me if the following statement is correct? Answer yes, no, or maybe.", + "medqa": "You are a medical doctor taking the US Medical Licensing Examination. You need to demonstrate your understanding of basic and clinical science, medical knowledge, and mechanisms underlying health, disease, patient care, and modes of therapy. Show your ability to apply the knowledge essential for medical practice. For the following multiple-choice question, select one correct answer from A to E. Base your answer on the current and standard practices referenced in medical guidelines.", + "medmcqa": "You are a medical doctor answering realworld medical entrance exam questions. Based on your understanding of basic and clinical science, medical knowledge, and mechanisms underlying health, disease, patient care, and modes of therapy, answer the following multiple-choice question. Select one correct answer from A to D. Base your answer on the current and standard practices referenced in medical guidelines.", +} + + +def infer_pubmedqa(model, tokenizer, batch_size, run_name): + name = "pubmedqa" + answer_type = "boolean" + dataset = datasets.load_dataset( + "bigbio/pubmed_qa", + "pubmed_qa_labeled_fold0_source", + split="test", + trust_remote_code=True, + ) + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + context = "\n".join(row["CONTEXTS"]) + row["prompt"] = f"{context}\n{row['QUESTION']}" + row["gold"] = row["final_decision"] + row["long_answer"] = row["LONG_ANSWER"] + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def infer_medqa(model, tokenizer, batch_size, run_name): + name = "medqa" + answer_type = "mcq" + dataset = datasets.load_dataset( + "bigbio/med_qa", + "med_qa_en_4options_source", + split="test", + trust_remote_code=True, + ) + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + choices = [opt["value"] for opt in row["options"]] + row["prompt"] = format_example(row["question"], choices) + for opt in row["options"]: + if opt["value"] == row["answer"]: + row["gold"] = opt["key"] + break + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def infer_medmcqa(model, tokenizer, batch_size, run_name): + name = "medmcqa" + answer_type = "mcq" + dataset = datasets.load_dataset( + "medmcqa", split="validation", trust_remote_code=True + ) + + # Post process + instruction = INSTRUCTIONS[name] + + def post_process(row): + options = [row["opa"], row["opb"], row["opc"], row["opd"]] + answer = int(row["cop"]) + row["prompt"] = format_example(row["question"], options) + row["gold"] = chr(ord("A") + answer) if answer in [0, 1, 2, 3] else None + row["prompt"] = f"{instruction}\n{row['prompt']}\nThe answer is:\n" + return row + + dataset = dataset.map(post_process) + + # Generate results + generate_results(name, run_name, dataset, model, tokenizer, batch_size, answer_type) + + +def generate_results( + name, run_name, dataset, model, tokenizer, batch_size, answer_type +): + # Run inference + prediction = inference(dataset, model, tokenizer, batch_size) + + # Calculate accuracy + acc = accuracy_compute(prediction, answer_type) + + # Save results and generations + save_results(name, run_name, prediction, acc) + + +def inference(dataset, model, tokenizer, batch_size): + columns_process = ["prompt", "gold"] + dataset_process = pd.DataFrame(dataset, columns=dataset.features)[columns_process] + dataset_process = dataset_process.assign(output="Null") + temperature = 1.0 + + inference_data = json.loads(dataset_process.to_json(orient="records")) + data_loader = DataLoader(inference_data, batch_size=batch_size, shuffle=False) + + batch_counter = 0 + for batch in tqdm(data_loader, total=len(data_loader), position=0, leave=True): + prompts = [ + f"<|im_start|>question\n{prompt}<|im_end|>\n<|im_start|>answer\n" + for prompt in batch["prompt"] + ] + if batch_counter == 0: + print(prompts[0]) + + # Process tokenizer + stop_seq = ["###"] + if tokenizer.eos_token is not None: + stop_seq.append(tokenizer.eos_token) + if tokenizer.pad_token is not None: + stop_seq.append(tokenizer.pad_token) + max_new_tokens = len( + tokenizer(batch["gold"][0], add_special_tokens=False)["input_ids"] + ) + + outputs = [] + for prompt in prompts: + input_ids = tokenizer.encode(prompt, return_tensors="pt").to("cuda") + output_ids = model.generate( + inputs=input_ids, + max_new_tokens=max_new_tokens, + do_sample=False, + top_p=1.0, + temperature=temperature, + pad_token_id=tokenizer.eos_token_id, + ) + output_ids = output_ids[0][len(input_ids[0]) :] + output = tokenizer.decode(output_ids, skip_special_tokens=True) + outputs.append(output) + + for prompt, out in zip(batch["prompt"], outputs): + dataset_process.loc[dataset_process["prompt"] == prompt, "output"] = out + batch_counter += 1 + + return dataset_process + + +def accuracy_compute(dataset, answer_type): + dataset = json.loads(dataset.to_json(orient="records")) + preds, golds = [], [] + for row in dataset: + answer = row["gold"].lower() + output = row["output"].lower() + pred, gold = format_answer(output, answer, answer_type=answer_type) + preds.append(pred) + golds.append(gold) + + accuracy = accuracy_score(preds, golds) + + return accuracy diff --git a/benchmarks/flowertune-llm/evaluation/medical/eval.py b/benchmarks/flowertune-llm/evaluation/medical/eval.py new file mode 100644 index 000000000000..7405e1493e4d --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/eval.py @@ -0,0 +1,62 @@ +import argparse + +import torch +from peft import PeftModel +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + +from benchmarks import infer_medmcqa, infer_medqa, infer_pubmedqa + +# Fixed seed +torch.manual_seed(2024) + +parser = argparse.ArgumentParser() +parser.add_argument( + "--base-model-name-path", type=str, default="mistralai/Mistral-7B-v0.3" +) +parser.add_argument("--run-name", type=str, default="fl") +parser.add_argument("--peft-path", type=str, default=None) +parser.add_argument( + "--datasets", + type=str, + default="pubmedqa", + help="The dataset to infer on: [pubmedqa, medqa, medmcqa]", +) +parser.add_argument("--batch-size", type=int, default=16) +parser.add_argument("--quantization", type=int, default=4) +args = parser.parse_args() + + +# Load model and tokenizer +if args.quantization == 4: + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + torch_dtype = torch.float32 +elif args.quantization == 8: + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + torch_dtype = torch.float16 +else: + raise ValueError( + f"Use 4-bit or 8-bit quantization. You passed: {args.quantization}/" + ) + +model = AutoModelForCausalLM.from_pretrained( + args.base_model_name_path, + quantization_config=quantization_config, + torch_dtype=torch_dtype, +) +if args.peft_path is not None: + model = PeftModel.from_pretrained( + model, args.peft_path, torch_dtype=torch_dtype + ).to("cuda") + +tokenizer = AutoTokenizer.from_pretrained(args.base_model_name_path) + +# Evaluate +for dataset in args.datasets.split(","): + if dataset == "pubmedqa": + infer_pubmedqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "medqa": + infer_medqa(model, tokenizer, args.batch_size, args.run_name) + elif dataset == "medmcqa": + infer_medmcqa(model, tokenizer, args.batch_size, args.run_name) + else: + raise ValueError("Undefined Dataset.") diff --git a/benchmarks/flowertune-llm/evaluation/medical/requirements.txt b/benchmarks/flowertune-llm/evaluation/medical/requirements.txt new file mode 100644 index 000000000000..f5c46e869ce2 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/requirements.txt @@ -0,0 +1,8 @@ +peft==0.6.2 +pandas==2.2.2 +scikit-learn==1.5.0 +datasets==2.20.0 +sentencepiece==0.2.0 +protobuf==5.27.1 +bitsandbytes==0.43.1 +hf_transfer==0.1.8 diff --git a/benchmarks/flowertune-llm/evaluation/medical/utils.py b/benchmarks/flowertune-llm/evaluation/medical/utils.py new file mode 100644 index 000000000000..44d0763d39d4 --- /dev/null +++ b/benchmarks/flowertune-llm/evaluation/medical/utils.py @@ -0,0 +1,81 @@ +import os +import re + + +def format_example(question, choices): + if not question.endswith("?") and not question.endswith("."): + question += "?" + options_str = "\n".join([f"{chr(65+i)}. {choices[i]}" for i in range(len(choices))]) + prompt = "Question: " + question + "\n\nOptions:\n" + options_str + return prompt + + +def save_results(dataset_name, run_name, dataset, acc): + path = "./benchmarks/" + if not os.path.exists(path): + os.makedirs(path) + + # Save results + results_path = os.path.join(path, f"acc_{dataset_name}_{run_name}.txt") + with open(results_path, "w") as f: + f.write(f"Accuracy: {acc}. ") + print(f"Accuracy: {acc}. ") + + # Save generations + generation_path = os.path.join(path, f"generation_{dataset_name}_{run_name}.jsonl") + dataset.to_json(generation_path, orient="records") + + +def format_answer(output_full, answer, answer_type="mcq"): + output = output_full + default = (output_full, answer) + if "\n##" in output: + try: + output = output.split("\n##")[1].split("\n")[0].strip().lower() + except Exception: + return default + if "###" in answer: + try: + answer = answer.split("answer is:")[1].split("###")[0].strip() + except Exception: + return default + + output = re.sub(r"[^a-zA-Z0-9]", " ", output).strip() + output = re.sub(" +", " ", output) + + if answer_type == "boolean": + output = clean_boolean_answer(output) + elif answer_type == "mcq": + output = clean_mcq_answer(output) + + if output in ["a", "b", "c", "d", "e", "yes", "no"]: + return output, answer + else: + return default + + +def clean_mcq_answer(output): + output = clean_answer(output) + try: + output = output[0] + except Exception: + return output + return output + + +def clean_boolean_answer(output): + if "yesyes" in output: + output = output.replace("yesyes", "yes") + elif "nono" in output: + output = output.replace("nono", "no") + elif "yesno" in output: + output = output.replace("yesno", "yes") + elif "noyes" in output: + output = output.replace("noyes", "no") + output = clean_answer(output) + return output + + +def clean_answer(output): + output_clean = output.encode("ascii", "ignore").decode("ascii") + return output_clean diff --git a/datasets/doc/source/conf.py b/datasets/doc/source/conf.py index dcba63dd221c..92d59d7df370 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/doc/source/conf.py @@ -38,7 +38,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "0.3.0" +release = "0.4.0" # -- General configuration --------------------------------------------------- diff --git a/datasets/doc/source/how-to-install-flwr-datasets.rst b/datasets/doc/source/how-to-install-flwr-datasets.rst index 2068fc11da85..3f79daceb753 100644 --- a/datasets/doc/source/how-to-install-flwr-datasets.rst +++ b/datasets/doc/source/how-to-install-flwr-datasets.rst @@ -42,5 +42,5 @@ If everything worked, it should print the version of Flower Datasets to the comm .. code-block:: none - 0.3.0 + 0.4.0 diff --git a/datasets/doc/source/index.rst b/datasets/doc/source/index.rst index d292f726cd0b..8699fa77e85e 100644 --- a/datasets/doc/source/index.rst +++ b/datasets/doc/source/index.rst @@ -3,6 +3,8 @@ Flower Datasets Flower Datasets (``flwr-datasets``) is a library that enables the quick and easy creation of datasets for federated learning/analytics/evaluation. It enables heterogeneity (non-iidness) simulation and division of datasets with the preexisting notion of IDs. The library was created by the ``Flower Labs`` team that also created `Flower `_ : A Friendly Federated Learning Framework. +Try out an interactive demo to generate code and visualize heterogeneous divisions at the :ref:`bottom of this page`. + Flower Datasets Framework ------------------------- @@ -134,7 +136,6 @@ What makes Flower Datasets stand out from other libraries? * New custom partitioning schemes (``Partitioner`` subclasses) integrated with the whole ecosystem. - Join the Flower Community ------------------------- @@ -145,3 +146,16 @@ The Flower Community is growing quickly - we're a friendly group of researchers, :shadow: Join us on Slack + +.. _demo: +Demo +---- + +.. raw:: html + + + + diff --git a/datasets/e2e/pytorch/pyproject.toml b/datasets/e2e/pytorch/pyproject.toml index 009ad2d74235..3f1f12d5f4b3 100644 --- a/datasets/e2e/pytorch/pyproject.toml +++ b/datasets/e2e/pytorch/pyproject.toml @@ -9,7 +9,7 @@ description = "Flower Datasets with PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" flwr-datasets = { path = "./../../", extras = ["vision"] } torch = "^1.12.0" torchvision = "^0.14.1" diff --git a/datasets/e2e/scikit-learn/pyproject.toml b/datasets/e2e/scikit-learn/pyproject.toml index 48356e4a945f..ca7fb45d82be 100644 --- a/datasets/e2e/scikit-learn/pyproject.toml +++ b/datasets/e2e/scikit-learn/pyproject.toml @@ -9,7 +9,7 @@ description = "Flower Datasets with scikit-learn" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" flwr-datasets = { path = "./../../", extras = ["vision"] } scikit-learn = "^1.2.0" parameterized = "==0.9.0" diff --git a/datasets/e2e/tensorflow/pyproject.toml b/datasets/e2e/tensorflow/pyproject.toml index dbb6720219b2..fbfc8eb89451 100644 --- a/datasets/e2e/tensorflow/pyproject.toml +++ b/datasets/e2e/tensorflow/pyproject.toml @@ -9,7 +9,7 @@ description = "Flower Datasets with TensorFlow" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr-datasets = { path = "./../../", extras = ["vision"] } tensorflow-cpu = "^2.9.1, !=2.11.1" tensorflow-io-gcs-filesystem = "<0.35.0" diff --git a/datasets/flwr_datasets/common/telemetry.py b/datasets/flwr_datasets/common/telemetry.py index ca484fdda73f..4bf80b93467d 100644 --- a/datasets/flwr_datasets/common/telemetry.py +++ b/datasets/flwr_datasets/common/telemetry.py @@ -25,7 +25,7 @@ from concurrent.futures import Future, ThreadPoolExecutor from enum import Enum, auto from pathlib import Path -from typing import Any, Dict, List, Optional, Union, cast +from typing import Any, Optional, Union, cast from flwr_datasets.common.version import package_name, package_version @@ -114,7 +114,7 @@ class EventType(str, Enum): # The type signature is not compatible with mypy, pylint and flake8 # so each of those needs to be disabled for this line. # pylint: disable-next=no-self-argument,arguments-differ,line-too-long - def _generate_next_value_(name: str, start: int, count: int, last_values: List[Any]) -> Any: # type: ignore # noqa: E501 + def _generate_next_value_(name: str, start: int, count: int, last_values: list[Any]) -> Any: # type: ignore # noqa: E501 return name PING = auto() @@ -127,7 +127,7 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. -state: Dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { +state: dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { # Will be assigned ThreadPoolExecutor(max_workers=1) # in event() the first time it's required "executor": None, @@ -143,7 +143,7 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A # pylint: disable-next=unsubscriptable-object def event( event_type: EventType, - event_details: Optional[Dict[str, Any]] = None, + event_details: Optional[dict[str, Any]] = None, ) -> Future: # type: ignore """Submit create_event to ThreadPoolExecutor to avoid blocking.""" if state["executor"] is None: @@ -155,7 +155,7 @@ def event( return result -def create_event(event_type: EventType, event_details: Optional[Dict[str, Any]]) -> str: +def create_event(event_type: EventType, event_details: Optional[dict[str, Any]]) -> str: """Create telemetry event.""" if state["source"] is None: state["source"] = _get_source_id() diff --git a/datasets/flwr_datasets/common/typing.py b/datasets/flwr_datasets/common/typing.py index ffaefaeec313..d6d37b468494 100644 --- a/datasets/flwr_datasets/common/typing.py +++ b/datasets/flwr_datasets/common/typing.py @@ -15,7 +15,7 @@ """Flower Datasets type definitions.""" -from typing import Any, List +from typing import Any import numpy as np import numpy.typing as npt @@ -23,4 +23,4 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] NDArrayFloat = npt.NDArray[np.float_] -NDArrays = List[NDArray] +NDArrays = list[NDArray] diff --git a/datasets/flwr_datasets/common/version.py b/datasets/flwr_datasets/common/version.py index 48c3fc5aaa9c..3e4c9a31fd6c 100644 --- a/datasets/flwr_datasets/common/version.py +++ b/datasets/flwr_datasets/common/version.py @@ -19,15 +19,14 @@ import importlib.metadata as importlib_metadata -from typing import Tuple -def _check_package(name: str) -> Tuple[str, str]: +def _check_package(name: str) -> tuple[str, str]: version: str = importlib_metadata.version(name) return name, version -def _version() -> Tuple[str, str]: +def _version() -> tuple[str, str]: """Read and return Flower Dataset package name and version. Returns diff --git a/datasets/flwr_datasets/federated_dataset.py b/datasets/flwr_datasets/federated_dataset.py index e913b9095d17..72ea54773564 100644 --- a/datasets/flwr_datasets/federated_dataset.py +++ b/datasets/flwr_datasets/federated_dataset.py @@ -161,6 +161,11 @@ def load_partition( not need to provide this argument, but if `partitioners={"train": 10, "test": 100}`, you need to set it to differentiate which partitioner should be used. + The split names you can choose from vary from dataset to dataset. You need + to check the dataset on the `Hugging Face Hub`_ to see which splits are available. You can resplit the dataset + by using the `preprocessor` parameter (to rename, merge, divide, etc. the + available splits). Returns ------- @@ -203,6 +208,11 @@ def load_split(self, split: str) -> Dataset: ---------- split : str Split name of the downloaded dataset (e.g. "train", "test"). + The split names you can choose from vary from dataset to dataset. You need + to check the dataset on the `Hugging Face Hub`_ to see which splits are available. You can resplit the dataset + by using the `preprocessor` parameter (to rename, merge, divide, etc. the + available splits). Returns ------- @@ -307,7 +317,8 @@ def _prepare_dataset(self) -> None: raise ValueError( "Probably one of the specified parameter in `load_dataset_kwargs` " "change the return type of the datasets.load_dataset function. " - "Make sure to use parameter such that the return type is DatasetDict." + "Make sure to use parameter such that the return type is DatasetDict. " + f"The return type is currently: {type(self._dataset)}." ) if self._shuffle: # Note it shuffles all the splits. The self._dataset is DatasetDict diff --git a/datasets/flwr_datasets/federated_dataset_test.py b/datasets/flwr_datasets/federated_dataset_test.py index 64d75a7a7a5a..bbdfa42292c2 100644 --- a/datasets/flwr_datasets/federated_dataset_test.py +++ b/datasets/flwr_datasets/federated_dataset_test.py @@ -17,7 +17,7 @@ import unittest -from typing import Dict, Union +from typing import Union from unittest.mock import Mock, patch import numpy as np @@ -35,12 +35,30 @@ mocked_datasets = ["cifar100", "svhn", "sentiment140", "speech_commands"] +mocked_by_partial_download_datasets = [ + "flwrlabs/pacs", + "flwrlabs/cinic10", + "flwrlabs/caltech101", + "flwrlabs/office-home", + "flwrlabs/fed-isic2019", +] + +natural_id_datasets = [ + "flwrlabs/femnist", +] + +mocked_natural_id_datasets = [ + "flwrlabs/ucf101", + "flwrlabs/ambient-acoustic-context", + "LIUM/tedlium", +] + @parameterized_class( ("dataset_name", "test_split", "subset"), [ # Downloaded - # #Image datasets + # Image ("mnist", "test", ""), ("cifar10", "test", ""), ("fashion_mnist", "test", ""), @@ -52,8 +70,8 @@ ("scikit-learn/adult-census-income", None, ""), ("jlh/uci-mushrooms", None, ""), ("scikit-learn/iris", None, ""), - # Mocked - # #Image + # Mocked by local recreation + # Image ("cifar100", "test", ""), # Note: there's also the extra split and full_numbers subset ("svhn", "test", "cropped_digits"), @@ -61,6 +79,13 @@ ("sentiment140", "test", ""), # aka twitter # Audio ("speech_commands", "test", "v0.01"), + # Mocked by partial download + # Image + ("flwrlabs/pacs", None, ""), + ("flwrlabs/cinic10", "test", ""), + ("flwrlabs/caltech101", None, ""), + ("flwrlabs/office-home", None, ""), + ("flwrlabs/fed-isic2019", "test", ""), ], ) class BaseFederatedDatasetsTest(unittest.TestCase): @@ -86,10 +111,29 @@ def setUp(self) -> None: self.mock_load_dataset.return_value = _load_mocked_dataset( self.dataset_name, [200, 100], ["train", self.test_split], self.subset ) + elif self.dataset_name in mocked_by_partial_download_datasets: + split_names = ["train"] + skip_take_lists = [[(0, 30), (1000, 30), (2000, 40)]] + # If the dataset has split test update the mocking to include it + if self.test_split is not None: + split_names.append(self.test_split) + skip_take_lists.append([(0, 30), (100, 30), (200, 40)]) + mock_return_value = _load_mocked_dataset_dict_by_partial_download( + dataset_name=self.dataset_name, + split_names=split_names, + skip_take_lists=skip_take_lists, + subset_name=None if self.subset == "" else self.subset, + ) + self.patcher = patch("datasets.load_dataset") + self.mock_load_dataset = self.patcher.start() + self.mock_load_dataset.return_value = mock_return_value def tearDown(self) -> None: """Clean up after the dataset mocking.""" - if self.dataset_name in mocked_datasets: + if ( + self.dataset_name in mocked_datasets + or self.dataset_name in mocked_by_partial_download_datasets + ): patch.stopall() @parameterized.expand( # type: ignore @@ -341,7 +385,7 @@ def test_dict_of_partitioners_passes_partitioners(self) -> None: """Test if partitioners are passed directly (no recreation).""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": IidPartitioner(num_partitions=num_test_partitions), } @@ -375,7 +419,7 @@ def test_mixed_type_partitioners_passes_instantiated_partitioners(self) -> None: """Test if an instantiated partitioner is passed directly.""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": num_test_partitions, } @@ -389,7 +433,7 @@ def test_mixed_type_partitioners_creates_from_int(self) -> None: """Test if an IidPartitioner partitioner is created.""" num_train_partitions = 100 num_test_partitions = 100 - partitioners: Dict[str, Union[Partitioner, int]] = { + partitioners: dict[str, Union[Partitioner, int]] = { "train": IidPartitioner(num_partitions=num_train_partitions), "test": num_test_partitions, } @@ -403,17 +447,6 @@ def test_mixed_type_partitioners_creates_from_int(self) -> None: ) -natural_id_datasets = [ - "flwrlabs/femnist", -] - -mocked_natural_id_datasets = [ - "flwrlabs/ucf101", - "flwrlabs/ambient-acoustic-context", - "LIUM/tedlium", -] - - @parameterized_class( ("dataset_name", "test_split", "subset", "partition_by"), [ diff --git a/datasets/flwr_datasets/metrics/utils.py b/datasets/flwr_datasets/metrics/utils.py index 8f78b2fd4c32..14e1f8d68110 100644 --- a/datasets/flwr_datasets/metrics/utils.py +++ b/datasets/flwr_datasets/metrics/utils.py @@ -16,7 +16,7 @@ import warnings -from typing import List, Optional, Union +from typing import Optional, Union import pandas as pd @@ -206,7 +206,7 @@ def compute_frequencies( def _compute_counts( - labels: Union[List[int], List[str]], unique_labels: Union[List[int], List[str]] + labels: Union[list[int], list[str]], unique_labels: Union[list[int], list[str]] ) -> pd.Series: """Compute the count of labels when taking into account all possible labels. @@ -237,7 +237,7 @@ def _compute_counts( def _compute_frequencies( - labels: Union[List[int], List[str]], unique_labels: Union[List[int], List[str]] + labels: Union[list[int], list[str]], unique_labels: Union[list[int], list[str]] ) -> pd.Series: """Compute the distribution of labels when taking into account all possible labels. diff --git a/datasets/flwr_datasets/mock_utils_test.py b/datasets/flwr_datasets/mock_utils_test.py index 3324ad5e7f51..acfa4b16e4ee 100644 --- a/datasets/flwr_datasets/mock_utils_test.py +++ b/datasets/flwr_datasets/mock_utils_test.py @@ -19,7 +19,7 @@ import random import string from datetime import datetime, timedelta -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Optional, Union import numpy as np from PIL import Image @@ -30,7 +30,7 @@ def _generate_artificial_strings( num_rows: int, num_unique: int, string_length: int, seed: int = 42 -) -> List[str]: +) -> list[str]: """Create list of strings for categories or labels mocking. Note to keep the seed the same if you reuse this function for in creation of the @@ -53,7 +53,7 @@ def _generate_artificial_strings( List of generated strings. """ random.seed(seed) - unique_strings: Set[str] = set() + unique_strings: set[str] = set() while len(unique_strings) < num_unique: random_str = "".join( random.choices(string.ascii_letters + string.digits, k=string_length) @@ -68,7 +68,7 @@ def _generate_artificial_strings( return artificial_column -def _generate_artificial_categories(num_rows: int, choices: List[Any]) -> List[str]: +def _generate_artificial_categories(num_rows: int, choices: list[Any]) -> list[str]: """Create list of strings from given `choices` list.""" artificial_column = choices.copy() remaining_to_allocate = num_rows - len(choices) @@ -82,7 +82,7 @@ def _generate_random_word(length: int) -> str: return "".join(random.choices(string.ascii_letters, k=length)) -def _generate_random_text_column(num_rows: int, length: int) -> List[str]: +def _generate_random_text_column(num_rows: int, length: int) -> list[str]: """Generate a list of random text of specified length.""" text_col = [] for _ in range(num_rows): @@ -98,7 +98,7 @@ def _generate_random_sentence( ) -> str: """Generate a random sentence with words of random lengths.""" sentence_length = random.randint(min_sentence_length, max_sentence_length) - sentence: List[str] = [] + sentence: list[str] = [] while len(" ".join(sentence)) < sentence_length: word_length = random.randint(min_word_length, max_word_length) word = _generate_random_word(word_length) @@ -112,7 +112,7 @@ def _generate_random_sentences( max_word_length: int, min_sentence_length: int, max_sentence_length: int, -) -> List[str]: +) -> list[str]: """Generate a list of random sentences.""" text_col = [ _generate_random_sentence( @@ -123,7 +123,7 @@ def _generate_random_sentences( return text_col -def _make_num_rows_none(column: List[Any], num_none: int) -> List[Any]: +def _make_num_rows_none(column: list[Any], num_none: int) -> list[Any]: """Assign none num_none times to the given list.""" column_copy = column.copy() none_positions = random.sample(range(len(column_copy)), num_none) @@ -154,7 +154,7 @@ def _generate_random_date_column( end_date: datetime, date_format: str = "%a %b %d %H:%M:%S %Y", as_string: bool = True, -) -> List[Union[str, datetime]]: +) -> list[Union[str, datetime]]: """Generate a list of random dates.""" return [ _generate_random_date(start_date, end_date, date_format, as_string) @@ -162,21 +162,21 @@ def _generate_random_date_column( ] -def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> List[int]: +def _generate_random_int_column(num_rows: int, min_int: int, max_int: int) -> list[int]: """Generate a list of ints.""" return [random.randint(min_int, max_int) for _ in range(num_rows)] -def _generate_random_bool_column(num_rows: int) -> List[bool]: +def _generate_random_bool_column(num_rows: int) -> list[bool]: """Generate a list of bools.""" return [random.choice([True, False]) for _ in range(num_rows)] def _generate_random_image_column( num_rows: int, - image_size: Union[Tuple[int, int], Tuple[int, int, int]], + image_size: Union[tuple[int, int], tuple[int, int, int]], simulate_type: str, -) -> List[Any]: +) -> list[Any]: """Simulate the images with the format that is found in HF Hub. Directly using `Image.fromarray` does not work because it creates `PIL.Image.Image`. @@ -207,7 +207,7 @@ def generate_random_audio_column( num_rows: int, sampling_rate: int, length_in_samples: int, -) -> List[Dict[str, Any]]: +) -> list[dict[str, Any]]: """Simulate the audio column. Audio column in the datset is comprised from an array or floats, sample_rate and a @@ -365,8 +365,8 @@ def _mock_speach_commands(num_rows: int) -> Dataset: def _load_mocked_dataset( dataset_name: str, - num_rows: List[int], - split_names: List[str], + num_rows: list[int], + split_names: list[str], subset: str = "", ) -> DatasetDict: dataset_dict = {} @@ -380,7 +380,7 @@ def _load_mocked_dataset( def _load_mocked_dataset_by_partial_download( dataset_name: str, split_name: str, - skip_take_list: List[Tuple[int, int]], + skip_take_list: list[tuple[int, int]], subset_name: Optional[str] = None, ) -> Dataset: """Download a partial dataset. @@ -409,7 +409,11 @@ def _load_mocked_dataset_by_partial_download( The dataset with the requested samples. """ dataset = datasets.load_dataset( - dataset_name, name=subset_name, split=split_name, streaming=True + dataset_name, + name=subset_name, + split=split_name, + streaming=True, + trust_remote_code=True, ) dataset_list = [] # It's a list of dict such that each dict represent a single sample of the dataset @@ -423,11 +427,14 @@ def _load_mocked_dataset_by_partial_download( def _load_mocked_dataset_dict_by_partial_download( dataset_name: str, - split_names: List[str], - skip_take_lists: List[List[Tuple[int, int]]], + split_names: list[str], + skip_take_lists: list[list[tuple[int, int]]], subset_name: Optional[str] = None, ) -> DatasetDict: """Like _load_mocked_dataset_by_partial_download but for many splits.""" + assert len(split_names) == len( + skip_take_lists + ), "The split_names should be thesame length as the skip_take_lists." dataset_dict = {} for split_name, skip_take_list in zip(split_names, skip_take_lists): dataset_dict[split_name] = _load_mocked_dataset_by_partial_download( diff --git a/datasets/flwr_datasets/partitioner/__init__.py b/datasets/flwr_datasets/partitioner/__init__.py index 4d7cea3a01de..e675a8a2f5b8 100644 --- a/datasets/flwr_datasets/partitioner/__init__.py +++ b/datasets/flwr_datasets/partitioner/__init__.py @@ -18,6 +18,8 @@ from .dirichlet_partitioner import DirichletPartitioner from .distribution_partitioner import DistributionPartitioner from .exponential_partitioner import ExponentialPartitioner +from .grouped_natural_id_partitioner import GroupedNaturalIdPartitioner +from .id_to_size_fnc_partitioner import IdToSizeFncPartitioner from .iid_partitioner import IidPartitioner from .image_semantic_partitioner import ImageSemanticPartitioner from .inner_dirichlet_partitioner import InnerDirichletPartitioner @@ -33,6 +35,8 @@ "DirichletPartitioner", "DistributionPartitioner", "ExponentialPartitioner", + "GroupedNaturalIdPartitioner", + "IdToSizeFncPartitioner", "IidPartitioner", "ImageSemanticPartitioner", "InnerDirichletPartitioner", diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py index dce208419181..55c190087f7c 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner.py @@ -16,7 +16,7 @@ import warnings -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -89,7 +89,7 @@ def __init__( # pylint: disable=R0913 self, num_partitions: int, partition_by: str, - alpha: Union[int, float, List[float], NDArrayFloat], + alpha: Union[int, float, list[float], NDArrayFloat], min_partition_size: int = 10, self_balancing: bool = False, shuffle: bool = True, @@ -110,8 +110,8 @@ def __init__( # pylint: disable=R0913 # Utility attributes # The attributes below are determined during the first call to load_partition self._avg_num_of_samples_per_partition: Optional[float] = None - self._unique_classes: Optional[Union[List[int], List[str]]] = None - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._unique_classes: Optional[Union[list[int], list[str]]] = None + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -142,7 +142,7 @@ def num_partitions(self) -> int: return self._num_partitions def _initialize_alpha( - self, alpha: Union[int, float, List[float], NDArrayFloat] + self, alpha: Union[int, float, list[float], NDArrayFloat] ) -> NDArrayFloat: """Convert alpha to the used format in the code a NDArrayFloat. @@ -164,7 +164,7 @@ def _initialize_alpha( alpha = np.array([float(alpha)], dtype=float).repeat(self._num_partitions) elif isinstance(alpha, float): alpha = np.array([alpha], dtype=float).repeat(self._num_partitions) - elif isinstance(alpha, List): + elif isinstance(alpha, list): if len(alpha) != self._num_partitions: raise ValueError( "If passing alpha as a List, it needs to be of length of equal to " @@ -217,7 +217,7 @@ def _determine_partition_id_to_indices_if_needed( sampling_try = 0 while True: # Prepare data structure to store indices assigned to partition ids - partition_id_to_indices: Dict[int, List[int]] = {} + partition_id_to_indices: dict[int, list[int]] = {} for nid in range(self._num_partitions): partition_id_to_indices[nid] = [] diff --git a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py index b2407b5d5822..ed38e8ee2a41 100644 --- a/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/dirichlet_partitioner_test.py @@ -17,7 +17,7 @@ # pylint: disable=W0212 import unittest -from typing import Tuple, Union +from typing import Union import numpy as np from numpy.typing import NDArray @@ -33,7 +33,7 @@ def _dummy_setup( num_rows: int, partition_by: str, self_balancing: bool = True, -) -> Tuple[Dataset, DirichletPartitioner]: +) -> tuple[Dataset, DirichletPartitioner]: """Create a dummy dataset and partitioner for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner.py b/datasets/flwr_datasets/partitioner/distribution_partitioner.py index e9acc41c707e..86be62b36070 100644 --- a/datasets/flwr_datasets/partitioner/distribution_partitioner.py +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner.py @@ -16,7 +16,7 @@ from collections import Counter -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -182,7 +182,7 @@ def __init__( # pylint: disable=R0913 self._num_unique_labels: int = 0 self._num_columns: int = 0 self._partition_id_to_indices_determined = False - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} def load_partition(self, partition_id: int) -> datasets.Dataset: """Load a partition based on the partition index. diff --git a/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py b/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py index bfeafd355be6..306e208a706b 100644 --- a/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/distribution_partitioner_test.py @@ -17,7 +17,7 @@ import unittest from collections import Counter -from typing import Any, Dict, List, Tuple, Union +from typing import Any, Union import numpy as np from parameterized import parameterized_class @@ -62,7 +62,7 @@ def _get_partitioner( num_unique_labels: int, preassigned_num_samples_per_label: int, rescale_mode: bool = True, -) -> Tuple[DistributionPartitioner, Dict[int, Dataset]]: +) -> tuple[DistributionPartitioner, dict[int, Dataset]]: """Create DistributionPartitioner instance.""" dataset = _dummy_dataset_setup( num_samples, @@ -83,7 +83,7 @@ def _get_partitioner( rescale=rescale_mode, ) partitioner.dataset = dataset - partitions: Dict[int, Dataset] = { + partitions: dict[int, Dataset] = { pid: partitioner.load_partition(pid) for pid in range(num_partitions) } @@ -135,7 +135,7 @@ def test_correct_num_times_classes_sampled_across_partitions(self) -> None: preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, ) - partitioned_distribution: Dict[Any, List[Any]] = { + partitioned_distribution: dict[Any, list[Any]] = { label: [] for label in partitioner.dataset.unique("labels") } @@ -162,7 +162,7 @@ def test_exact_distribution_assignment(self) -> None: preassigned_num_samples_per_label=self.preassigned_num_samples_per_label, rescale_mode=False, ) - partitioned_distribution: Dict[Any, List[Any]] = { + partitioned_distribution: dict[Any, list[Any]] = { label: [] for label in partitioner.dataset.unique("labels") } diff --git a/datasets/flwr_datasets/partitioner/exponential_partitioner.py b/datasets/flwr_datasets/partitioner/exponential_partitioner.py index 5d9f34352af1..1bf838df5909 100644 --- a/datasets/flwr_datasets/partitioner/exponential_partitioner.py +++ b/datasets/flwr_datasets/partitioner/exponential_partitioner.py @@ -17,10 +17,10 @@ import numpy as np -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class ExponentialPartitioner(SizePartitioner): +class ExponentialPartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are correlated with exp(id). The amount of data each client gets is correlated with the exponent of partition ID. diff --git a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py new file mode 100644 index 000000000000..4ce4f3717190 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner.py @@ -0,0 +1,224 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Grouped natural id partitioner class that works with Hugging Face Datasets.""" + + +from typing import Any, Literal + +import numpy as np + +import datasets +from flwr_datasets.common.typing import NDArrayInt +from flwr_datasets.partitioner.partitioner import Partitioner + + +class GroupedNaturalIdPartitioner(Partitioner): + """Partition dataset by creating groups of natural ids. + + Conceptually, you can think of this partitioner as a way of creating an organization + of x users instead of each user represetning a separate partition. You can change + the nature of the problem from cross-device to cross-silo (cross organization). + + Parameters + ---------- + partition_by: str + The name of the column that contains the unique values of partitions. + group_size: int + The number of unique ids that will be placed in a single group. + mode: Literal["allow-smaller", "allow-bigger", "drop-reminder", ""strict"] + The mode that will be used to handle the remainder of the unique ids. + - "allow-smaller": The last group can be smaller than the group_size. + - "allow-bigger": The first group can be bigger than the group_size. + - "drop-reminder": The last group will be dropped if it is smaller than the + group_size. + - "strict": Raises a ValueError if the remainder is not zero. In this mode, you + expect each group to have the same size. + sort_unique_ids: bool + If True, the unique natural ids will be sorted before creating the groups. + + Examples + -------- + Partition users in the "sentiment140" (aka Twitter) dataset into groups of two + users following the default mode: + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import GroupedNaturalIdPartitioner + >>> + >>> partitioner = GroupedNaturalIdPartitioner(partition_by="user", group_size=2) + >>> fds = FederatedDataset(dataset="sentiment140", + >>> partitioners={"train": partitioner}) + >>> partition = fds.load_partition(0) + """ + + def __init__( + self, + partition_by: str, + group_size: int, + mode: Literal[ + "allow-smaller", "allow-bigger", "drop-reminder", "strict" + ] = "allow-smaller", + sort_unique_ids: bool = False, + ) -> None: + super().__init__() + self._partition_id_to_natural_ids: dict[int, list[Any]] = {} + self._natural_id_to_partition_id: dict[Any, int] = {} + self._partition_id_to_indices: dict[int, NDArrayInt] = {} + self._partition_by = partition_by + self._mode = mode + self._sort_unique_ids = sort_unique_ids + + if group_size < 0: + raise ValueError("group_size must be a positive integer") + self._group_size = group_size + + def _create_int_partition_id_to_natural_id(self) -> None: + """Create a mapping from int indices to unique client ids from dataset. + + Natural ids come from the column specified in `partition_by`. + """ + unique_natural_ids = self.dataset.unique(self._partition_by) + if self._mode != "allow-smaller" and self._group_size > len(unique_natural_ids): + raise ValueError( + "The group size needs to be smaller than the number of the unique " + "natural ids unless you are using allow-smaller mode which will " + "result in a single partition." + ) + if self._sort_unique_ids: + unique_natural_ids = sorted(unique_natural_ids) + num_unique_natural_ids = len(unique_natural_ids) + remainder = num_unique_natural_ids % self._group_size + num_groups = num_unique_natural_ids // self._group_size + if num_groups == 0 and self._mode == "allow-smaller": + num_groups = 1 + remainder = 0 + # Note that the number of groups might be different that this number + # due to certain modes, it's a base value. + + if self._mode == "allow-bigger": + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + elif self._mode == "drop-reminder": + # Narrow down the unique_natural_ids to not have a bigger group + # which is the behavior of the np.array_split + unique_natural_ids = unique_natural_ids[ + : int(num_groups * self._group_size) + ] + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + elif self._mode == "allow-smaller": + if remainder > 0: + last_group_ids = unique_natural_ids[-remainder:] + unique_natural_ids = unique_natural_ids[ + : int(num_groups * self._group_size) + ] + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + if remainder > 0: + groups_of_natural_ids.append(np.array(last_group_ids)) + elif self._mode == "strict": + if remainder != 0: + raise ValueError( + "Strict mode requires that the number of unique natural ids is " + "perfectly divisible by the group size. " + f"Found remainder: {remainder}. Please pass the group_size that " + f"enables strict mode or relax the mode parameter. Refer to the " + f"documentation of the mode parameter for the available modes." + ) + groups_of_natural_ids = np.array_split(unique_natural_ids, num_groups) + else: + raise ValueError( + f"Given {self._mode} is not a valid mode. Refer to the documentation of" + " the mode parameter for the available modes." + ) + + self._partition_id_to_natural_ids = {} + for group_of_natural_ids_id, group_of_natural_ids in enumerate( + groups_of_natural_ids + ): + self._partition_id_to_natural_ids[group_of_natural_ids_id] = ( + group_of_natural_ids.tolist() + ) + + def _create_natural_id_to_int_partition_id(self) -> None: + """Create a mapping from unique client ids from dataset to int indices. + + Natural ids come from the column specified in `partition_by`. This object is + inverse of the `self._partition_id_to_natural_id`. This method assumes that + `self._partition_id_to_natural_id` already exists. + """ + self._natural_id_to_partition_id = {} + for partition_id, natural_ids in self._partition_id_to_natural_ids.items(): + for natural_id in natural_ids: + self._natural_id_to_partition_id[natural_id] = partition_id + + def _create_partition_id_to_indices(self) -> None: + natural_id_to_indices = {} # type: ignore + natural_ids = np.array(self.dataset[self._partition_by]) + + for index, natural_id in enumerate(natural_ids): + if natural_id not in natural_id_to_indices: + natural_id_to_indices[natural_id] = [] + natural_id_to_indices[natural_id].append(index) + + self._partition_id_to_indices = {} + for partition_id, natural_id_group in self._partition_id_to_natural_ids.items(): + indices = [] + for natural_id in natural_id_group: + indices.extend(natural_id_to_indices[natural_id]) + self._partition_id_to_indices[partition_id] = np.array(indices) + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a single partition corresponding to a single `partition_id`. + + The choice of the partition is based on unique integers assigned to each + natural id present in the dataset in the `partition_by` column. + + + Parameters + ---------- + partition_id : int + the index that corresponds to the requested partition + + Returns + ------- + dataset_partition : Dataset + single dataset partition + """ + if len(self._partition_id_to_natural_ids) == 0: + self._create_int_partition_id_to_natural_id() + self._create_natural_id_to_int_partition_id() + + if len(self._partition_id_to_indices) == 0: + self._create_partition_id_to_indices() + + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + if len(self._partition_id_to_natural_ids) == 0: + self._create_int_partition_id_to_natural_id() + self._create_natural_id_to_int_partition_id() + return len(self._partition_id_to_natural_ids) + + @property + def partition_id_to_natural_ids(self) -> dict[int, list[Any]]: + """Partition id to the corresponding group of natural ids present. + + Natural ids are the unique values in `partition_by` column in dataset. + """ + return self._partition_id_to_natural_ids + + @property + def natural_id_to_partition_id(self) -> dict[Any, int]: + """Natural id to the corresponding partition id.""" + return self._natural_id_to_partition_id diff --git a/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py new file mode 100644 index 000000000000..014d18c1dc15 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/grouped_natural_id_partitioner_test.py @@ -0,0 +1,310 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test GroupedNaturalIdPartitioner.""" + + +import unittest +from typing import Literal + +from parameterized import parameterized, parameterized_class + +from datasets import Dataset +from flwr_datasets.partitioner.grouped_natural_id_partitioner import ( + GroupedNaturalIdPartitioner, +) + + +def _create_dataset(num_rows: int, n_unique_natural_ids: int) -> Dataset: + """Create dataset based on the number of rows and unique natural ids.""" + data = { + "features": list(range(num_rows)), + "natural_id": [f"{i % n_unique_natural_ids}" for i in range(num_rows)], + "labels": [i % 2 for i in range(num_rows)], + } + dataset = Dataset.from_dict(data) + return dataset + + +# mypy: disable-error-code="attr-defined" +@parameterized_class( + ("sort_unique_ids",), + [ + (False,), + (True,), + ], +) +# pylint: disable=no-member +class TestGroupedNaturalIdPartitioner(unittest.TestCase): + """Test GroupedNaturalIdPartitioner.""" + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions + [ + [10, 10, 2, 5], + [11, 10, 2, 5], + [100, 10, 2, 5], + [12, 6, 3, 2], + ] + ) + def test_strict_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_partitions: int, + ) -> None: + """Test strict mode with valid group size.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="strict", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + _ = partitioner.load_partition(0) + self.assertEqual(partitioner.num_partitions, expected_num_partitions) + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions, + # expected_num_unique_natural_ids + [ + [10, 10, 2, [2, 2, 2, 2, 2]], + [100, 10, 2, [2, 2, 2, 2, 2]], + [12, 6, 3, [3, 3]], + # The cases in which the partitions should be smaller + [10, 7, 2, [2, 2, 2, 1]], + [10, 3, 2, [2, 1]], + ] + ) + def test_allow_smaller_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_unique_natural_ids: list[int], + ) -> None: + """Test allow-smaller mode handles the remainder correctly.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="allow-smaller", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions, + # expected_num_unique_natural_ids + [ + [10, 10, 2, [2, 2, 2, 2, 2]], + [100, 10, 2, [2, 2, 2, 2, 2]], + [12, 6, 3, [3, 3]], + # The cases in which the partitions should be smaller + [10, 7, 2, [3, 2, 2]], + [10, 3, 2, [3]], + ] + ) + def test_allow_bigger_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_unique_natural_ids: list[int], + ) -> None: + """Test allow-bigger mode handles the remainder correctly.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="allow-bigger", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + @parameterized.expand( # type: ignore + # num_rows, num_unique_natural_ids, group_size, expected_num_partitions, + # expected_num_unique_natural_ids + [ + [10, 10, 2, [2, 2, 2, 2, 2]], + [100, 10, 2, [2, 2, 2, 2, 2]], + [12, 6, 3, [3, 3]], + # The cases in which the partitions should be smaller + [10, 7, 2, [2, 2, 2]], + [10, 3, 2, [2]], + ] + ) + def test_drop_reminder_mode_num_partitions_and_partition_sizes( + self, + num_rows: int, + num_unique_natural_id: int, + group_size: int, + expected_num_unique_natural_ids: list[int], + ) -> None: + """Test drop reminder mode.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode="drop-reminder", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + @parameterized.expand( # type: ignore + # mode, num_rows, num_unique_natural_ids, group_size + [ + ["strict", 10, 10, 2], + ["allow-smaller", 10, 7, 2], + ["allow-bigger", 10, 7, 2], + ["drop-reminder", 10, 7, 2], + ["strict", 12, 6, 3], + ["allow-smaller", 12, 6, 3], + ["allow-bigger", 12, 6, 3], + ["drop-reminder", 12, 6, 3], + ["allow-smaller", 10, 2, 3], + ] + ) + def test_no_overlapping_natural_ids( + self, + mode: Literal["allow-smaller", "allow-bigger", "drop-reminder", "strict"], + num_rows: int, + num_unique_natural_id: int, + group_size: int, + ) -> None: + """Test that no natural_ids overlap across partitions.""" + dataset = _create_dataset(num_rows, num_unique_natural_id) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=group_size, + mode=mode, + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + + # Check for overlaps between partitions + seen_natural_ids: set[str] = set() + for partition in partitions: + natural_ids_in_partition = set(partition.unique("natural_id")) + + # Check if there is any overlap with previously seen natural IDs + overlap = seen_natural_ids.intersection(natural_ids_in_partition) + self.assertTrue( + len(overlap) == 0, + f"Overlapping natural IDs found between partitions in mode: {mode}. " + f"Overlapping IDs: {overlap}", + ) + + # Add the natural IDs from this partition to the seen set + seen_natural_ids.update(natural_ids_in_partition) + + def test_group_size_bigger_than_num_unique_natural_ids_allow_smaller(self) -> None: + """Test the allow-smaller mode with group size > number of unique natural ids. + + That's the only mode that should work in this scenario. + """ + dataset = _create_dataset(num_rows=10, n_unique_natural_ids=2) + expected_num_unique_natural_ids = [2] + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=3, + mode="allow-smaller", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + # Trigger partitioning + partitions = [ + partitioner.load_partition(i) for i in range(partitioner.num_partitions) + ] + unique_natural_ids = [ + len(partition.unique("natural_id")) for partition in partitions + ] + + self.assertEqual(unique_natural_ids, expected_num_unique_natural_ids) + + def test_strict_mode_with_invalid_group_size(self) -> None: + """Test strict mode raises if group_size does not divide unique IDs evenly.""" + dataset = _create_dataset(num_rows=10, n_unique_natural_ids=3) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=2, + mode="strict", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + _ = partitioner.load_partition(0) + self.assertIn( + "Strict mode requires that the number of unique natural ids is perfectly " + "divisible by the group size.", + str(context.exception), + ) + + def test_too_big_group_size(self) -> None: + """Test raises if the group size > than the number of unique natural ids.""" + n_unique_natural_ids = 3 + dataset = _create_dataset( + num_rows=10, n_unique_natural_ids=n_unique_natural_ids + ) + partitioner = GroupedNaturalIdPartitioner( + partition_by="natural_id", + group_size=n_unique_natural_ids + 1, + mode="allow-bigger", + sort_unique_ids=self.sort_unique_ids, + ) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + _ = partitioner.load_partition(0) + self.assertIn( + "The group size needs to be smaller than the number of the unique " + "natural ids unless you are using allow-smaller mode which will " + "result in a single partition.", + str(context.exception), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py new file mode 100644 index 000000000000..bd6336eb0801 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner.py @@ -0,0 +1,145 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IdToSizeFncPartitioner class.""" + + +from typing import Callable, Union + +import numpy as np + +import datasets +from flwr_datasets.partitioner.partitioner import Partitioner + + +class IdToSizeFncPartitioner(Partitioner): + """Base class for the deterministic size partitioning based on the `partition_id`. + + The client with `partition_id` has the following relationship regarding the number + of samples. + + `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` + + If the function doesn't transform the `partition_id` it's a linear correlation + between the number of sample for the partition and the value of `partition_id`. For + instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of + data, client 2 gets 2 units, and so on, up to partition M which gets M units. + + Note that size corresponding to the `partition_id` is deterministic, yet in case of + different dataset shuffling the assignment of samples to `partition_id` will vary. + + Parameters + ---------- + num_partitions : int + The total number of partitions that the data will be divided into. + partition_id_to_size_fn : Callable + Function that defines the relationship between partition id and the number of + samples. + """ + + def __init__( + self, + num_partitions: int, + partition_id_to_size_fn: Callable, # type: ignore[type-arg] + ) -> None: + super().__init__() + if num_partitions <= 0: + raise ValueError("The number of partitions must be greater than zero.") + self._num_partitions = num_partitions + self._partition_id_to_size_fn = partition_id_to_size_fn + + self._partition_id_to_size: dict[int, int] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} + # A flag to perform only a single compute to determine the indices + self._partition_id_to_indices_determined = False + + def load_partition(self, partition_id: int) -> datasets.Dataset: + """Load a single partition based on the partition index. + + The number of samples is dependent on the partition partition_id. + + Parameters + ---------- + partition_id : int + the index that corresponds to the requested partition + + Returns + ------- + dataset_partition: Dataset + single dataset partition + """ + # The partitioning is done lazily - only when the first partition is requested. + # A single run creates the indices assignments for all the partition indices. + self._determine_partition_id_to_indices_if_needed() + return self.dataset.select(self._partition_id_to_indices[partition_id]) + + @property + def num_partitions(self) -> int: + """Total number of partitions.""" + self._determine_partition_id_to_indices_if_needed() + return self._num_partitions + + @property + def partition_id_to_size(self) -> dict[int, int]: + """Node id to the number of samples.""" + return self._partition_id_to_size + + @property + def partition_id_to_indices(self) -> dict[int, list[int]]: + """Node id to the list of indices.""" + return self._partition_id_to_indices + + def _determine_partition_id_to_size(self) -> None: + """Determine data quantity associated with partition indices.""" + data_division_in_units = self._partition_id_to_size_fn( + np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) + ) + total_units: Union[int, float] = data_division_in_units.sum() + # Normalize the units to get the fraction total dataset + partition_sizes_as_fraction = data_division_in_units / total_units + # Calculate the number of samples + partition_sizes_as_num_of_samples = np.array( + partition_sizes_as_fraction * len(self.dataset), dtype=np.int64 + ) + # Check if any sample is not allocated because of multiplication with fractions. + assigned_samples = np.sum(partition_sizes_as_num_of_samples) + left_unassigned_samples = len(self.dataset) - assigned_samples + # If there is any sample(s) left unassigned, assign it to the largest partition. + partition_sizes_as_num_of_samples[-1] += left_unassigned_samples + for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): + self._partition_id_to_size[idx] = partition_size + + self._check_if_partition_id_to_size_possible() + + def _determine_partition_id_to_indices_if_needed(self) -> None: + """Create an assignment of indices to the partition indices..""" + if self._partition_id_to_indices_determined is True: + return + self._determine_partition_id_to_size() + total_samples_assigned = 0 + for idx, quantity in self._partition_id_to_size.items(): + self._partition_id_to_indices[idx] = list( + range(total_samples_assigned, total_samples_assigned + quantity) + ) + total_samples_assigned += quantity + self._partition_id_to_indices_determined = True + + def _check_if_partition_id_to_size_possible(self) -> None: + all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) + if not all_positive: + raise ValueError( + f"The given specification of the parameter num_partitions" + f"={self._num_partitions} for the given dataset results " + f"in the partitions sizes that are not greater than 0." + ) diff --git a/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py new file mode 100644 index 000000000000..905aa8cc9303 --- /dev/null +++ b/datasets/flwr_datasets/partitioner/id_to_size_fnc_partitioner_test.py @@ -0,0 +1,104 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""IdToSizeFuncitonPartitioner tests.""" + + +import unittest + +from parameterized import parameterized + +from datasets import Dataset +from flwr_datasets.partitioner.linear_partitioner import LinearPartitioner + + +def _dummy_dataset(num_rows: int) -> Dataset: + data = { + "features": list(range(num_rows)), + "labels": [i % 2 for i in range(num_rows)], + } + dataset = Dataset.from_dict(data) + return dataset + + +class TestLinearPartitioner(unittest.TestCase): + """Test LinearPartitioner.""" + + @parameterized.expand( # type: ignore + [ + (1, 100), + (10, 100), + (5, 55), # This will leave some undivided samples + ] + ) + def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: + """Test the linear distribution of samples.""" + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + # Run a single partition loading to trigger the division + _ = partitioner.load_partition(0) + total_samples = sum(partitioner.partition_id_to_size.values()) + self.assertEqual(total_samples, num_rows) + + # Testing if each partition is getting more than the previous one + last_count = 0 + for i in range(num_partitions): + current_count = partitioner.partition_id_to_size[i] + self.assertGreaterEqual(current_count, last_count) + last_count = current_count + + @parameterized.expand( # type: ignore + [ + (10, 100), + (5, 55), # This will leave some undivided samples + (7, 77), # This will leave some undivided samples + ] + ) + def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: + """Test the logic for distributing undivided samples.""" + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + # If there are any undivided samples, they should be added to the largest + # partition + last_partition_id = num_partitions - 1 + actual_samples_in_last_partition = len( + partitioner.load_partition(last_partition_id) + ) + expected_samples_in_last_partition = partitioner.partition_id_to_size[ + last_partition_id + ] + self.assertEqual( + expected_samples_in_last_partition, actual_samples_in_last_partition + ) + + def test_meaningless_params(self) -> None: + """Test if the params leading to partition size not greater than zero raises.""" + num_rows = 10 + num_partitions = 100 + dataset = _dummy_dataset(num_rows) + partitioner = LinearPartitioner(num_partitions=num_partitions) + partitioner.dataset = dataset + with self.assertRaises(ValueError) as context: + partitioner.load_partition(1) + self.assertIn( + "The given specification of the parameter num_partitions=100 for the given " + "dataset results in the partitions sizes that are not greater than 0.", + str(context.exception), + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py index 64c37c4e7127..cbdc67be7fa5 100644 --- a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Tuple from parameterized import parameterized @@ -24,7 +23,7 @@ from flwr_datasets.partitioner.iid_partitioner import IidPartitioner -def _dummy_setup(num_partitions: int, num_rows: int) -> Tuple[Dataset, IidPartitioner]: +def _dummy_setup(num_partitions: int, num_rows: int) -> tuple[Dataset, IidPartitioner]: """Create a dummy dataset and partitioner based on given arguments. The partitioner has automatically the dataset assigned to it. diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py index e3e46813dfc8..e62b8fdbb212 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner.py @@ -14,7 +14,7 @@ # ============================================================================== """InnerDirichlet partitioner.""" import warnings -from typing import Dict, List, Optional, Union +from typing import Optional, Union import numpy as np @@ -68,9 +68,9 @@ class InnerDirichletPartitioner(Partitioner): # pylint: disable=R0902 def __init__( # pylint: disable=R0913 self, - partition_sizes: Union[List[int], NDArrayInt], + partition_sizes: Union[list[int], NDArrayInt], partition_by: str, - alpha: Union[int, float, List[float], NDArrayFloat], + alpha: Union[int, float, list[float], NDArrayFloat], shuffle: bool = True, seed: Optional[int] = 42, ) -> None: @@ -87,11 +87,11 @@ def __init__( # pylint: disable=R0913 self._initialized_alpha = False self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator # The attributes below are determined during the first call to load_partition - self._unique_classes: Optional[Union[List[int], List[str]]] = None + self._unique_classes: Optional[Union[list[int], list[str]]] = None self._num_unique_classes: Optional[int] = None self._num_partitions = len(self._partition_sizes) - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -130,7 +130,7 @@ def num_partitions(self) -> int: return self._num_partitions def _initialize_alpha_if_needed( - self, alpha: Union[int, float, List[float], NDArrayFloat] + self, alpha: Union[int, float, list[float], NDArrayFloat] ) -> NDArrayFloat: """Convert alpha to the used format in the code a NDArrayFloat. @@ -159,7 +159,7 @@ def _initialize_alpha_if_needed( elif isinstance(alpha, float): assert self._num_unique_classes is not None alpha = np.array([alpha], dtype=float).repeat(self._num_unique_classes) - elif isinstance(alpha, List): + elif isinstance(alpha, list): if len(alpha) != self._num_unique_classes: raise ValueError( "When passing alpha as a List, its length needs needs to be " @@ -304,10 +304,10 @@ def _check_the_sum_of_partition_sizes(self) -> None: def _instantiate_partition_sizes( - partition_sizes: Union[List[int], NDArrayInt] + partition_sizes: Union[list[int], NDArrayInt] ) -> NDArrayInt: """Transform list to the ndarray of ints if needed.""" - if isinstance(partition_sizes, List): + if isinstance(partition_sizes, list): partition_sizes = np.asarray(partition_sizes) elif isinstance(partition_sizes, np.ndarray): pass diff --git a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py index 86dc8a5df532..8df09d01f916 100644 --- a/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/inner_dirichlet_partitioner_test.py @@ -15,7 +15,7 @@ """Test DirichletPartitioner.""" # pylint: disable=W0212 import unittest -from typing import List, Tuple, Union +from typing import Union from datasets import Dataset from flwr_datasets.common.typing import NDArrayFloat, NDArrayInt @@ -27,9 +27,9 @@ def _dummy_setup( num_rows: int, partition_by: str, - partition_sizes: Union[List[int], NDArrayInt], - alpha: Union[float, List[float], NDArrayFloat], -) -> Tuple[Dataset, InnerDirichletPartitioner]: + partition_sizes: Union[list[int], NDArrayInt], + alpha: Union[float, list[float], NDArrayFloat], +) -> tuple[Dataset, InnerDirichletPartitioner]: """Create a dummy dataset and partitioner for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/linear_partitioner.py b/datasets/flwr_datasets/partitioner/linear_partitioner.py index 840307edcac6..07fea16df5e0 100644 --- a/datasets/flwr_datasets/partitioner/linear_partitioner.py +++ b/datasets/flwr_datasets/partitioner/linear_partitioner.py @@ -15,10 +15,10 @@ """LinearPartitioner class.""" -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class LinearPartitioner(SizePartitioner): +class LinearPartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are linearly correlated with id. The amount of data each client gets is linearly correlated with the partition ID. diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py index 5a9af3271cb4..64b51855e1f4 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner.py @@ -15,8 +15,6 @@ """Natural id partitioner class that works with Hugging Face Datasets.""" -from typing import Dict - import numpy as np from tqdm import tqdm @@ -62,9 +60,9 @@ def __init__( partition_by: str, ): super().__init__() - self._partition_id_to_natural_id: Dict[int, str] = {} - self._natural_id_to_partition_id: Dict[str, int] = {} - self._partition_id_to_indices: Dict[int, NDArrayInt] = {} + self._partition_id_to_natural_id: dict[int, str] = {} + self._natural_id_to_partition_id: dict[str, int] = {} + self._partition_id_to_indices: dict[int, NDArrayInt] = {} self._partition_by = partition_by def _create_int_partition_id_to_natural_id(self) -> None: @@ -138,7 +136,7 @@ def num_partitions(self) -> int: return len(self._partition_id_to_natural_id) @property - def partition_id_to_natural_id(self) -> Dict[int, str]: + def partition_id_to_natural_id(self) -> dict[int, str]: """Node id to corresponding natural id present. Natural ids are the unique values in `partition_by` column in dataset. @@ -146,7 +144,7 @@ def partition_id_to_natural_id(self) -> Dict[int, str]: return self._partition_id_to_natural_id @partition_id_to_natural_id.setter - def partition_id_to_natural_id(self, value: Dict[int, str]) -> None: + def partition_id_to_natural_id(self, value: dict[int, str]) -> None: raise AttributeError( "Setting the partition_id_to_natural_id dictionary is not allowed." ) diff --git a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py index b74a044967ef..d3147985dca9 100644 --- a/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/natural_id_partitioner_test.py @@ -18,7 +18,6 @@ import itertools import math import unittest -from typing import Tuple from parameterized import parameterized @@ -28,7 +27,7 @@ def _dummy_setup( num_rows: int, n_unique_natural_ids: int -) -> Tuple[Dataset, NaturalIdPartitioner]: +) -> tuple[Dataset, NaturalIdPartitioner]: """Create a dummy dataset and partitioner based on given arguments. The partitioner has automatically the dataset assigned to it. diff --git a/datasets/flwr_datasets/partitioner/pathological_partitioner.py b/datasets/flwr_datasets/partitioner/pathological_partitioner.py index 1ee60d283044..d114ccbda02f 100644 --- a/datasets/flwr_datasets/partitioner/pathological_partitioner.py +++ b/datasets/flwr_datasets/partitioner/pathological_partitioner.py @@ -16,7 +16,7 @@ import warnings -from typing import Any, Dict, List, Literal, Optional +from typing import Any, Literal, Optional import numpy as np @@ -114,13 +114,13 @@ def __init__( self._rng = np.random.default_rng(seed=self._seed) # Utility attributes - self._partition_id_to_indices: Dict[int, List[int]] = {} - self._partition_id_to_unique_labels: Dict[int, List[Any]] = { + self._partition_id_to_indices: dict[int, list[int]] = {} + self._partition_id_to_unique_labels: dict[int, list[Any]] = { pid: [] for pid in range(self._num_partitions) } - self._unique_labels: List[Any] = [] + self._unique_labels: list[Any] = [] # Count in how many partitions the label is used - self._unique_label_to_times_used_counter: Dict[Any, int] = {} + self._unique_label_to_times_used_counter: dict[Any, int] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -225,7 +225,7 @@ def _determine_partition_id_to_unique_labels(self) -> None: if self._class_assignment_mode == "first-deterministic": # if self._first_class_deterministic_assignment: for partition_id in range(self._num_partitions): - label = partition_id % num_unique_classes + label = self._unique_labels[partition_id % num_unique_classes] self._partition_id_to_unique_labels[partition_id].append(label) while ( diff --git a/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py b/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py index 151b7e14659c..5a3b13bb1436 100644 --- a/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/pathological_partitioner_test.py @@ -16,10 +16,9 @@ import unittest -from typing import Dict import numpy as np -from parameterized import parameterized +from parameterized import parameterized, parameterized_class import datasets from datasets import Dataset @@ -27,7 +26,10 @@ def _dummy_dataset_setup( - num_samples: int, partition_by: str, num_unique_classes: int + num_samples: int, + partition_by: str, + num_unique_classes: int, + string_partition_by: bool = False, ) -> Dataset: """Create a dummy dataset for testing.""" data = { @@ -36,6 +38,8 @@ def _dummy_dataset_setup( )[:num_samples], "features": np.random.randn(num_samples), } + if string_partition_by: + data[partition_by] = data[partition_by].astype(str) return Dataset.from_dict(data) @@ -52,6 +56,7 @@ def _dummy_heterogeneous_dataset_setup( return Dataset.from_dict(data) +@parameterized_class(("string_partition_by",), [(False,), (True,)]) class TestClassConstrainedPartitioner(unittest.TestCase): """Unit tests for PathologicalPartitioner.""" @@ -79,7 +84,7 @@ def test_correct_num_classes_when_partitioned( num_classes_per_partition=num_classes_per_partition, ) partitioner.dataset = dataset - partitions: Dict[int, Dataset] = { + partitions: dict[int, Dataset] = { pid: partitioner.load_partition(pid) for pid in range(num_partitions) } unique_classes_per_partition = { @@ -95,7 +100,8 @@ def test_first_class_deterministic_assignment(self) -> None: Test if all the classes are used (which has to be the case, given num_partitions >= than the number of unique classes). """ - dataset = _dummy_dataset_setup(100, "labels", 10) + partition_by = "labels" + dataset = _dummy_dataset_setup(100, partition_by, 10) partitioner = PathologicalPartitioner( num_partitions=10, partition_by="labels", @@ -104,7 +110,12 @@ def test_first_class_deterministic_assignment(self) -> None: ) partitioner.dataset = dataset partitioner.load_partition(0) - expected_classes = set(range(10)) + expected_classes = set( + range(10) + # pylint: disable=unsubscriptable-object + if isinstance(dataset[partition_by][0], int) + else [str(i) for i in range(10)] + ) actual_classes = set() for pid in range(10): partition = partitioner.load_partition(pid) @@ -142,6 +153,9 @@ def test_deterministic_class_assignment( for i in range(num_classes_per_partition) ] ) + # pylint: disable=unsubscriptable-object + if isinstance(dataset["labels"][0], str): + expected_labels = [str(label) for label in expected_labels] actual_labels = sorted(np.unique(partition["labels"])) self.assertTrue( np.array_equal(expected_labels, actual_labels), @@ -167,6 +181,9 @@ def test_too_many_partitions_for_a_class( "labels": np.array([num_unique_classes - 1] * (num_samples // 2)), "features": np.random.randn(num_samples // 2), } + # pylint: disable=unsubscriptable-object + if isinstance(dataset_1["labels"][0], str): + data["labels"] = data["labels"].astype(str) dataset_2 = Dataset.from_dict(data) dataset = datasets.concatenate_datasets([dataset_1, dataset_2]) diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner.py b/datasets/flwr_datasets/partitioner/shard_partitioner.py index 11cffa515da0..3001df6dcb69 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner.py @@ -17,7 +17,7 @@ # pylint: disable=R0912, R0914 import math -from typing import Dict, List, Optional +from typing import Optional import numpy as np @@ -165,7 +165,7 @@ def __init__( # pylint: disable=R0913 # Utility attributes self._rng = np.random.default_rng(seed=self._seed) # NumPy random generator - self._partition_id_to_indices: Dict[int, List[int]] = {} + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: @@ -299,7 +299,7 @@ def _determine_partition_id_to_indices_if_needed( nid_to_shard_indices = np.split( shard_indices_array, indices_on_which_to_split_shards )[:-1] - partition_id_to_indices: Dict[int, List[int]] = { + partition_id_to_indices: dict[int, list[int]] = { cid: [] for cid in range(self._num_partitions) } # Compute partition_id to sample indices based on the shard indices diff --git a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py index d6fa8b529595..be8edf9d2764 100644 --- a/datasets/flwr_datasets/partitioner/shard_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/shard_partitioner_test.py @@ -17,7 +17,7 @@ # pylint: disable=W0212, R0913 import unittest -from typing import Optional, Tuple +from typing import Optional from datasets import Dataset from flwr_datasets.partitioner.shard_partitioner import ShardPartitioner @@ -30,7 +30,7 @@ def _dummy_setup( num_shards_per_partition: Optional[int], shard_size: Optional[int], keep_incomplete_shard: bool = False, -) -> Tuple[Dataset, ShardPartitioner]: +) -> tuple[Dataset, ShardPartitioner]: """Create a dummy dataset for testing.""" data = { partition_by: [i % 3 for i in range(num_rows)], diff --git a/datasets/flwr_datasets/partitioner/size_partitioner.py b/datasets/flwr_datasets/partitioner/size_partitioner.py index 35937d8b9cc7..a79b6b7249f2 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,72 +15,56 @@ """SizePartitioner class.""" -from typing import Callable, Dict, List, Union - -import numpy as np +import warnings +from collections.abc import Sequence import datasets from flwr_datasets.partitioner.partitioner import Partitioner class SizePartitioner(Partitioner): - """Base class for the deterministic size partitioning based on the `partition_id`. - - The client with `partition_id` has the following relationship regarding the number - of samples. - - `partition_id_to_size_fn(partition_id)` ~ number of samples for `partition_id` - - If the function doesn't transform the `partition_id` it's a linear correlation - between the number of sample for the partition and the value of `partition_id`. For - instance, if the partition ids range from 1 to M, partition with id 1 gets 1 unit of - data, client 2 gets 2 units, and so on, up to partition M which gets M units. - - Note that size corresponding to the `partition_id` is deterministic, yet in case of - different dataset shuffling the assignment of samples to `partition_id` will vary. + """Partitioner that creates each partition with the size specified by a user. Parameters ---------- - num_partitions : int - The total number of partitions that the data will be divided into. - partition_id_to_size_fn : Callable - Function that defines the relationship between partition id and the number of - samples. + partition_sizes : Sequence[int] + The size of each partition. partition_id 0 will have partition_sizes[0] + samples, partition_id 1 will have partition_sizes[1] samples, etc. + + Examples + -------- + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.partitioner import SizePartitioner + >>> + >>> partition_sizes = [15_000, 5_000, 30_000] + >>> partitioner = SizePartitioner(partition_sizes) + >>> fds = FederatedDataset(dataset="cifar10", partitioners={"train": partitioner}) """ - def __init__( - self, - num_partitions: int, - partition_id_to_size_fn: Callable, # type: ignore[type-arg] - ) -> None: + def __init__(self, partition_sizes: Sequence[int]) -> None: super().__init__() - if num_partitions <= 0: - raise ValueError("The number of partitions must be greater than zero.") - self._num_partitions = num_partitions - self._partition_id_to_size_fn = partition_id_to_size_fn - - self._partition_id_to_size: Dict[int, int] = {} - self._partition_id_to_indices: Dict[int, List[int]] = {} - # A flag to perform only a single compute to determine the indices + self._pre_ds_validate_partition_sizes(partition_sizes) + self._partition_sizes = partition_sizes + self._partition_id_to_indices: dict[int, list[int]] = {} self._partition_id_to_indices_determined = False def load_partition(self, partition_id: int) -> datasets.Dataset: - """Load a single partition based on the partition index. + """Load a single partition of the size of partition_sizes[partition_id]. - The number of samples is dependent on the partition partition_id. + For example if given partition_sizes=[20_000, 10_000, 30_000], + then partition_id=0 will return a partition of size 20_000, + partition_id=1 will return a partition of size 10_000, etc. Parameters ---------- partition_id : int - the index that corresponds to the requested partition + The index that corresponds to the requested partition. Returns ------- - dataset_partition: Dataset - single dataset partition + dataset_partition : Dataset + Single dataset partition. """ - # The partitioning is done lazily - only when the first partition is requested. - # A single run creates the indices assignments for all the partition indices. self._determine_partition_id_to_indices_if_needed() return self.dataset.select(self._partition_id_to_indices[partition_id]) @@ -88,58 +72,57 @@ def load_partition(self, partition_id: int) -> datasets.Dataset: def num_partitions(self) -> int: """Total number of partitions.""" self._determine_partition_id_to_indices_if_needed() - return self._num_partitions + return len(self._partition_sizes) @property - def partition_id_to_size(self) -> Dict[int, int]: - """Node id to the number of samples.""" - return self._partition_id_to_size - - @property - def partition_id_to_indices(self) -> Dict[int, List[int]]: - """Node id to the list of indices.""" + def partition_id_to_indices(self) -> dict[int, list[int]]: + """Partition id to indices (the result of partitioning).""" + self._determine_partition_id_to_indices_if_needed() return self._partition_id_to_indices - def _determine_partition_id_to_size(self) -> None: - """Determine data quantity associated with partition indices.""" - data_division_in_units = self._partition_id_to_size_fn( - np.linspace(start=1, stop=self._num_partitions, num=self._num_partitions) - ) - total_units: Union[int, float] = data_division_in_units.sum() - # Normalize the units to get the fraction total dataset - partition_sizes_as_fraction = data_division_in_units / total_units - # Calculate the number of samples - partition_sizes_as_num_of_samples = np.array( - partition_sizes_as_fraction * len(self.dataset), dtype=np.int64 - ) - # Check if any sample is not allocated because of multiplication with fractions. - assigned_samples = np.sum(partition_sizes_as_num_of_samples) - left_unassigned_samples = len(self.dataset) - assigned_samples - # If there is any sample(s) left unassigned, assign it to the largest partition. - partition_sizes_as_num_of_samples[-1] += left_unassigned_samples - for idx, partition_size in enumerate(partition_sizes_as_num_of_samples): - self._partition_id_to_size[idx] = partition_size - - self._check_if_partition_id_to_size_possible() - - def _determine_partition_id_to_indices_if_needed(self) -> None: - """Create an assignment of indices to the partition indices..""" - if self._partition_id_to_indices_determined is True: + def _determine_partition_id_to_indices_if_needed( + self, + ) -> None: + """Create an assignment of indices to the partition indices.""" + if self._partition_id_to_indices_determined: return - self._determine_partition_id_to_size() - total_samples_assigned = 0 - for idx, quantity in self._partition_id_to_size.items(): - self._partition_id_to_indices[idx] = list( - range(total_samples_assigned, total_samples_assigned + quantity) - ) - total_samples_assigned += quantity + self._post_ds_validate_partition_sizes() + start = 0 + end = 0 + for partition_id, partition_size in enumerate(self._partition_sizes): + end += partition_size + indices = list(range(start, end)) + self._partition_id_to_indices[partition_id] = indices + start = end self._partition_id_to_indices_determined = True - def _check_if_partition_id_to_size_possible(self) -> None: - all_positive = all(value >= 1 for value in self.partition_id_to_size.values()) - if not all_positive: + def _pre_ds_validate_partition_sizes(self, partition_sizes: Sequence[int]) -> None: + """Check if the partition sizes are valid (no information about the dataset).""" + if not isinstance(partition_sizes, Sequence): + raise ValueError("Partition sizes must be a sequence.") + if len(partition_sizes) == 0: + raise ValueError("Partition sizes must not be empty.") + if not all( + isinstance(partition_size, int) for partition_size in partition_sizes + ): + raise ValueError("All partition sizes must be integers.") + if not all(partition_size > 0 for partition_size in partition_sizes): + raise ValueError("All partition sizes must be greater than zero.") + + def _post_ds_validate_partition_sizes(self) -> None: + """Validate the partition sizes against the dataset size.""" + desired_partition_sizes = sum(self._partition_sizes) + dataset_size = len(self.dataset) + if desired_partition_sizes > dataset_size: raise ValueError( - f"The given specification of the parameter num_partitions" - f"={self._num_partitions} for the given dataset results " - f"in the partitions sizes that are not greater than 0." + f"The sum of partition sizes sum({self._partition_sizes})" + f"= {desired_partition_sizes} is greater than the size of" + f" the dataset {dataset_size}." + ) + if desired_partition_sizes < dataset_size: + warnings.warn( + f"The sum of partition sizes is {desired_partition_sizes}, which is" + f"smaller than the size of the dataset: {dataset_size}. " + f"Ignore this warning if it is the desired behavior.", + stacklevel=1, ) diff --git a/datasets/flwr_datasets/partitioner/size_partitioner_test.py b/datasets/flwr_datasets/partitioner/size_partitioner_test.py index 086ca3731e58..be8edf9d2764 100644 --- a/datasets/flwr_datasets/partitioner/size_partitioner_test.py +++ b/datasets/flwr_datasets/partitioner/size_partitioner_test.py @@ -12,92 +12,380 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""SizePartitioner tests.""" +"""Test ShardPartitioner.""" +# pylint: disable=W0212, R0913 import unittest - -from parameterized import parameterized +from typing import Optional from datasets import Dataset -from flwr_datasets.partitioner.linear_partitioner import LinearPartitioner +from flwr_datasets.partitioner.shard_partitioner import ShardPartitioner -def _dummy_dataset(num_rows: int) -> Dataset: +def _dummy_setup( + num_rows: int, + partition_by: str, + num_partitions: int, + num_shards_per_partition: Optional[int], + shard_size: Optional[int], + keep_incomplete_shard: bool = False, +) -> tuple[Dataset, ShardPartitioner]: + """Create a dummy dataset for testing.""" data = { + partition_by: [i % 3 for i in range(num_rows)], "features": list(range(num_rows)), - "labels": [i % 2 for i in range(num_rows)], } dataset = Dataset.from_dict(data) - return dataset + partitioner = ShardPartitioner( + num_partitions=num_partitions, + num_shards_per_partition=num_shards_per_partition, + partition_by=partition_by, + shard_size=shard_size, + keep_incomplete_shard=keep_incomplete_shard, + ) + partitioner.dataset = dataset + return dataset, partitioner + + +class TestShardPartitionerSpec1(unittest.TestCase): + """Test first possible initialization of ShardPartitioner. + + Specify num_shards_per_partition and shard_size arguments. + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [30, 30, 30]) -class TestLinearPartitioner(unittest.TestCase): - """Test LinearPartitioner.""" + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. - @parameterized.expand( # type: ignore - [ - (1, 100), - (10, 100), - (5, 55), # This will leave some undivided samples + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - ) - def test_linear_distribution(self, num_partitions: int, num_rows: int) -> None: - """Test the linear distribution of samples.""" - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - # Run a single partition loading to trigger the division + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec2(unittest.TestCase): + """Test second possible initialization of ShardPartitioner. + + Specify shard_size and keep_incomplete_shard=False. This setting creates partitions + that might have various sizes (each shard is same size). + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) _ = partitioner.load_partition(0) - total_samples = sum(partitioner.partition_id_to_size.values()) - self.assertEqual(total_samples, num_rows) - - # Testing if each partition is getting more than the previous one - last_count = 0 - for i in range(num_partitions): - current_count = partitioner.partition_id_to_size[i] - self.assertGreaterEqual(current_count, last_count) - last_count = current_count - - @parameterized.expand( # type: ignore - [ - (10, 100), - (5, 55), # This will leave some undivided samples - (7, 77), # This will leave some undivided samples + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [30, 40, 40]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - ) - def test_undivided_samples(self, num_partitions: int, num_rows: int) -> None: - """Test the logic for distributing undivided samples.""" - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - # If there are any undivided samples, they should be added to the largest - # partition - last_partition_id = num_partitions - 1 - actual_samples_in_last_partition = len( - partitioner.load_partition(last_partition_id) + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec3(unittest.TestCase): + """Test third possible initialization of ShardPartitioner. + + Specify shard_size and keep_incomplete_shard=True. This setting creates partitions + that might have various sizes (each shard is same size). + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [33, 40, 40]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = True + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) - expected_samples_in_last_partition = partitioner.partition_id_to_size[ - last_partition_id + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) ] - self.assertEqual( - expected_samples_in_last_partition, actual_samples_in_last_partition + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerSpec4(unittest.TestCase): + """Test fourth possible initialization of ShardPartitioner. + + Specify num_shards_per_partition but not shard_size arguments. + """ + + def test_correct_num_partitions(self) -> None: + """Test the correct number of partitions is created.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) + _ = partitioner.load_partition(0) + num_partitions_created = len(partitioner._partition_id_to_indices.keys()) + self.assertEqual(num_partitions_created, num_partitions) + + def test_correct_partition_sizes(self) -> None: + """Test if the partitions sizes are as theoretically calculated.""" + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + sizes = [len(partitioner.load_partition(i)) for i in range(num_partitions)] + sizes = sorted(sizes) + self.assertEqual(sizes, [36, 36, 36]) + + def test_unique_samples(self) -> None: + """Test if each partition has unique samples. + + (No duplicates along partitions). + """ + partition_by = "label" + num_rows = 113 + num_partitions = 3 + num_shards_per_partition = 3 + shard_size = None + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + partitions = [ + partitioner.load_partition(i)["features"] for i in range(num_partitions) + ] + combined_list = [item for sublist in partitions for item in sublist] + combined_set = set(combined_list) + self.assertEqual(len(combined_list), len(combined_set)) + + +class TestShardPartitionerIncorrectSpec(unittest.TestCase): + """Test the incorrect specification cases. - def test_meaningless_params(self) -> None: - """Test if the params leading to partition size not greater than zero raises.""" + The lack of correctness can be caused by the num_partitions, shard_size and + num_shards_per_partition can create. + """ + + def test_incorrect_specification(self) -> None: + """Test if the given specification makes the partitioning possible.""" + partition_by = "label" num_rows = 10 - num_partitions = 100 - dataset = _dummy_dataset(num_rows) - partitioner = LinearPartitioner(num_partitions=num_partitions) - partitioner.dataset = dataset - with self.assertRaises(ValueError) as context: - partitioner.load_partition(1) - self.assertIn( - "The given specification of the parameter num_partitions=100 for the given " - "dataset results in the partitions sizes that are not greater than 0.", - str(context.exception), + num_partitions = 3 + num_shards_per_partition = 2 + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, + ) + with self.assertRaises(ValueError): + _ = partitioner.load_partition(0) + + def test_too_big_shard_size(self) -> None: + """Test if it is impossible to create an empty partition.""" + partition_by = "label" + num_rows = 20 + num_partitions = 3 + num_shards_per_partition = None + shard_size = 10 + keep_incomplete_shard = False + _, partitioner = _dummy_setup( + num_rows, + partition_by, + num_partitions, + num_shards_per_partition, + shard_size, + keep_incomplete_shard, ) + with self.assertRaises(ValueError): + _ = partitioner.load_partition(2).num_rows if __name__ == "__main__": diff --git a/datasets/flwr_datasets/partitioner/square_partitioner.py b/datasets/flwr_datasets/partitioner/square_partitioner.py index 0fa0a0803a0e..d48af247e5cb 100644 --- a/datasets/flwr_datasets/partitioner/square_partitioner.py +++ b/datasets/flwr_datasets/partitioner/square_partitioner.py @@ -17,10 +17,10 @@ import numpy as np -from flwr_datasets.partitioner.size_partitioner import SizePartitioner +from flwr_datasets.partitioner.id_to_size_fnc_partitioner import IdToSizeFncPartitioner -class SquarePartitioner(SizePartitioner): +class SquarePartitioner(IdToSizeFncPartitioner): """Partitioner creates partitions of size that are correlated with squared id. The amount of data each client gets is correlated with the squared partition ID. diff --git a/datasets/flwr_datasets/preprocessor/divider_test.py b/datasets/flwr_datasets/preprocessor/divider_test.py index ed282fbc18be..bb92d72c1c4a 100644 --- a/datasets/flwr_datasets/preprocessor/divider_test.py +++ b/datasets/flwr_datasets/preprocessor/divider_test.py @@ -15,7 +15,7 @@ """Divider tests.""" import unittest -from typing import Dict, Union +from typing import Union from parameterized import parameterized_class @@ -84,14 +84,14 @@ class TestDivider(unittest.TestCase): """Divider tests.""" divide_config: Union[ - Dict[str, float], - Dict[str, int], - Dict[str, Dict[str, float]], - Dict[str, Dict[str, int]], + dict[str, float], + dict[str, int], + dict[str, dict[str, float]], + dict[str, dict[str, int]], ] divide_split: str drop_remaining_splits: bool - split_name_to_size: Dict[str, int] + split_name_to_size: dict[str, int] def setUp(self) -> None: """Set up the dataset with 3 splits for tests.""" diff --git a/datasets/flwr_datasets/preprocessor/merger.py b/datasets/flwr_datasets/preprocessor/merger.py index 2b76dbbafe4b..e47993dd686e 100644 --- a/datasets/flwr_datasets/preprocessor/merger.py +++ b/datasets/flwr_datasets/preprocessor/merger.py @@ -18,7 +18,6 @@ import collections import warnings from functools import reduce -from typing import Dict, List, Tuple import datasets from datasets import Dataset, DatasetDict @@ -56,9 +55,9 @@ class Merger: def __init__( self, - merge_config: Dict[str, Tuple[str, ...]], + merge_config: dict[str, tuple[str, ...]], ) -> None: - self._merge_config: Dict[str, Tuple[str, ...]] = merge_config + self._merge_config: dict[str, tuple[str, ...]] = merge_config self._check_duplicate_merge_splits() def __call__(self, dataset: DatasetDict) -> DatasetDict: @@ -70,7 +69,7 @@ def resplit(self, dataset: DatasetDict) -> DatasetDict: """Resplit the dataset according to the `merge_config`.""" resplit_dataset = {} for divide_to, divided_from__list in self._merge_config.items(): - datasets_from_list: List[Dataset] = [] + datasets_from_list: list[Dataset] = [] for divide_from in divided_from__list: datasets_from_list.append(dataset[divide_from]) if len(datasets_from_list) > 1: diff --git a/datasets/flwr_datasets/preprocessor/merger_test.py b/datasets/flwr_datasets/preprocessor/merger_test.py index 137b0dd1a660..0dd534229eb0 100644 --- a/datasets/flwr_datasets/preprocessor/merger_test.py +++ b/datasets/flwr_datasets/preprocessor/merger_test.py @@ -16,7 +16,6 @@ import unittest -from typing import Dict, Tuple import pytest @@ -39,28 +38,28 @@ def setUp(self) -> None: def test_resplitting_train_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_train"]), 3) def test_resplitting_valid_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_valid": ("valid",)} + strategy: dict[str, tuple[str, ...]] = {"new_valid": ("valid",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_valid"]), 2) def test_resplitting_test_size(self) -> None: """Test if resplitting for just renaming keeps the lengths correct.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_test": ("test",)} + strategy: dict[str, tuple[str, ...]] = {"new_test": ("test",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertEqual(len(new_dataset["new_test"]), 1) def test_resplitting_train_the_same(self) -> None: """Test if resplitting for just renaming keeps the dataset the same.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) new_dataset = merger(self.dataset_dict) self.assertTrue( @@ -69,7 +68,7 @@ def test_resplitting_train_the_same(self) -> None: def test_combined_train_valid_size(self) -> None: """Test if the resplitting that combines the datasets has correct size.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "train_valid_combined": ("train", "valid") } merger = Merger(strategy) @@ -78,7 +77,7 @@ def test_combined_train_valid_size(self) -> None: def test_resplitting_test_with_combined_strategy_size(self) -> None: """Test if the resplitting that combines the datasets has correct size.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "train_valid_combined": ("train", "valid"), "test": ("test",), } @@ -88,7 +87,7 @@ def test_resplitting_test_with_combined_strategy_size(self) -> None: def test_invalid_resplit_strategy_exception_message(self) -> None: """Test if the resplitting raises error when non-existing split is given.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "new_train": ("invalid_split",), "new_test": ("test",), } @@ -100,7 +99,7 @@ def test_invalid_resplit_strategy_exception_message(self) -> None: def test_nonexistent_split_in_strategy(self) -> None: """Test if the exception is raised when the nonexistent split name is given.""" - strategy: Dict[str, Tuple[str, ...]] = {"new_split": ("nonexistent_split",)} + strategy: dict[str, tuple[str, ...]] = {"new_split": ("nonexistent_split",)} merger = Merger(strategy) with self.assertRaisesRegex( ValueError, "The given dataset key 'nonexistent_split' is not present" @@ -109,7 +108,7 @@ def test_nonexistent_split_in_strategy(self) -> None: def test_duplicate_merge_split_name(self) -> None: """Test that the new split names are not the same.""" - strategy: Dict[str, Tuple[str, ...]] = { + strategy: dict[str, tuple[str, ...]] = { "new_train": ("train", "valid"), "test": ("train",), } @@ -119,7 +118,7 @@ def test_duplicate_merge_split_name(self) -> None: def test_empty_dataset_dict(self) -> None: """Test that the error is raised when the empty DatasetDict is given.""" empty_dataset = DatasetDict({}) - strategy: Dict[str, Tuple[str, ...]] = {"new_train": ("train",)} + strategy: dict[str, tuple[str, ...]] = {"new_train": ("train",)} merger = Merger(strategy) with self.assertRaisesRegex( ValueError, "The given dataset key 'train' is not present" diff --git a/datasets/flwr_datasets/utils.py b/datasets/flwr_datasets/utils.py index 32904ded2861..1657c2a0ebd3 100644 --- a/datasets/flwr_datasets/utils.py +++ b/datasets/flwr_datasets/utils.py @@ -16,7 +16,7 @@ import warnings -from typing import Dict, List, Optional, Tuple, Union, cast +from typing import Optional, Union, cast from datasets import Dataset, DatasetDict, concatenate_datasets from flwr_datasets.partitioner import IidPartitioner, Partitioner @@ -48,12 +48,17 @@ "Mike0307/MNIST-M", "flwrlabs/usps", "scikit-learn/iris", + "flwrlabs/pacs", + "flwrlabs/cinic10", + "flwrlabs/caltech101", + "flwrlabs/office-home", + "flwrlabs/fed-isic2019", ] def _instantiate_partitioners( - partitioners: Dict[str, Union[Partitioner, int]] -) -> Dict[str, Partitioner]: + partitioners: dict[str, Union[Partitioner, int]] +) -> dict[str, Partitioner]: """Transform the partitioners from the initial format to instantiated objects. Parameters @@ -66,8 +71,8 @@ def _instantiate_partitioners( partitioners : Dict[str, Partitioner] Partitioners specified as split to Partitioner object. """ - instantiated_partitioners: Dict[str, Partitioner] = {} - if isinstance(partitioners, Dict): + instantiated_partitioners: dict[str, Partitioner] = {} + if isinstance(partitioners, dict): for split, partitioner in partitioners.items(): if isinstance(partitioner, Partitioner): instantiated_partitioners[split] = partitioner @@ -90,10 +95,10 @@ def _instantiate_partitioners( def _instantiate_merger_if_needed( - merger: Optional[Union[Preprocessor, Dict[str, Tuple[str, ...]]]] + merger: Optional[Union[Preprocessor, dict[str, tuple[str, ...]]]] ) -> Optional[Preprocessor]: """Instantiate `Merger` if preprocessor is merge_config.""" - if merger and isinstance(merger, Dict): + if merger and isinstance(merger, dict): merger = Merger(merge_config=merger) return cast(Optional[Preprocessor], merger) @@ -108,8 +113,8 @@ def _check_if_dataset_tested(dataset: str) -> None: def divide_dataset( - dataset: Dataset, division: Union[List[float], Tuple[float, ...], Dict[str, float]] -) -> Union[List[Dataset], DatasetDict]: + dataset: Dataset, division: Union[list[float], tuple[float, ...], dict[str, float]] +) -> Union[list[Dataset], DatasetDict]: """Divide the dataset according to the `division`. The division support varying number of splits, which you can name. The splits are @@ -141,7 +146,8 @@ def divide_dataset( >>> division = [0.8, 0.2] >>> train, test = divide_dataset(dataset=partition, division=division) - Use `divide_dataset` with division specified as a dict. + Use `divide_dataset` with division specified as a dict + (this accomplishes the same goal as the example with a list above). >>> from flwr_datasets import FederatedDataset >>> from flwr_datasets.utils import divide_dataset @@ -156,12 +162,12 @@ def divide_dataset( dataset_length = len(dataset) ranges = _create_division_indices_ranges(dataset_length, division) if isinstance(division, (list, tuple)): - split_partition: List[Dataset] = [] + split_partition: list[Dataset] = [] for single_range in ranges: split_partition.append(dataset.select(single_range)) return split_partition if isinstance(division, dict): - split_partition_dict: Dict[str, Dataset] = {} + split_partition_dict: dict[str, Dataset] = {} for split_name, single_range in zip(division.keys(), ranges): split_partition_dict[split_name] = dataset.select(single_range) return DatasetDict(split_partition_dict) @@ -173,8 +179,8 @@ def divide_dataset( def _create_division_indices_ranges( dataset_length: int, - division: Union[List[float], Tuple[float, ...], Dict[str, float]], -) -> List[range]: + division: Union[list[float], tuple[float, ...], dict[str, float]], +) -> list[range]: ranges = [] if isinstance(division, (list, tuple)): start_idx = 0 @@ -200,7 +206,7 @@ def _create_division_indices_ranges( def _check_division_config_types_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: if isinstance(division, (list, tuple)): if not all(isinstance(x, float) for x in division): @@ -219,7 +225,7 @@ def _check_division_config_types_correctness( def _check_division_config_values_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: if isinstance(division, (list, tuple)): if not all(0 < x <= 1 for x in division): @@ -257,7 +263,7 @@ def _check_division_config_values_correctness( def _check_division_config_correctness( - division: Union[List[float], Tuple[float, ...], Dict[str, float]] + division: Union[list[float], tuple[float, ...], dict[str, float]] ) -> None: _check_division_config_types_correctness(division) _check_division_config_values_correctness(division) @@ -265,14 +271,14 @@ def _check_division_config_correctness( def concatenate_divisions( partitioner: Partitioner, - partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]], + partition_division: Union[list[float], tuple[float, ...], dict[str, float]], division_id: Union[int, str], ) -> Dataset: - """Create a dataset by concatenation of all partitions in the same division. + """Create a dataset by concatenation of divisions from all partitions. The divisions are created based on the `partition_division` and accessed based - on the `division_id`. It can be used to create e.g. centralized dataset from - federated on-edge test sets. + on the `division_id`. This fuction can be used to create e.g. centralized dataset + from federated on-edge test sets. Parameters ---------- @@ -293,6 +299,35 @@ def concatenate_divisions( ------- concatenated_divisions : Dataset A dataset created as concatenation of the divisions from all partitions. + + Examples + -------- + Use `concatenate_divisions` with division specified as a list. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.utils import concatenate_divisions + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> concatenated_divisions = concatenate_divisions( + ... partitioner=fds.partitioners["train"], + ... partition_division=[0.8, 0.2], + ... division_id=1 + ... ) + >>> print(concatenated_divisions) + + Use `concatenate_divisions` with division specified as a dict. + This accomplishes the same goal as the example with a list above. + + >>> from flwr_datasets import FederatedDataset + >>> from flwr_datasets.utils import concatenate_divisions + >>> + >>> fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + >>> concatenated_divisions = concatenate_divisions( + ... partitioner=fds["train"], + ... partition_division={"train": 0.8, "test": 0.2}, + ... division_id="test" + ... ) + >>> print(concatenated_divisions) """ _check_division_config_correctness(partition_division) divisions = [] @@ -307,7 +342,7 @@ def concatenate_divisions( ) partition = divide_dataset(partition, partition_division) division = partition[division_id] - elif isinstance(partition_division, Dict): + elif isinstance(partition_division, dict): partition = divide_dataset(partition, partition_division) division = partition[division_id] else: diff --git a/datasets/flwr_datasets/utils_test.py b/datasets/flwr_datasets/utils_test.py index 4add9f88eeb5..3c94570471ac 100644 --- a/datasets/flwr_datasets/utils_test.py +++ b/datasets/flwr_datasets/utils_test.py @@ -14,7 +14,7 @@ # ============================================================================== """Utils tests.""" import unittest -from typing import Dict, List, Tuple, Union +from typing import Union from parameterized import parameterized_class @@ -62,8 +62,8 @@ class UtilsTests(unittest.TestCase): """Utils for tests.""" - partition_division: Union[List[float], Tuple[float, ...], Dict[str, float]] - sizes: Tuple[int] + partition_division: Union[list[float], tuple[float, ...], dict[str, float]] + sizes: tuple[int] division_id: Union[int, str] expected_concatenation_size: int diff --git a/datasets/flwr_datasets/visualization/bar_plot.py b/datasets/flwr_datasets/visualization/bar_plot.py index 352c99a572f5..0f6936976fc0 100644 --- a/datasets/flwr_datasets/visualization/bar_plot.py +++ b/datasets/flwr_datasets/visualization/bar_plot.py @@ -15,28 +15,29 @@ """Label distribution bar plotting.""" -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Optional, Union import numpy as np import pandas as pd from matplotlib import colors as mcolors from matplotlib import pyplot as plt from matplotlib.axes import Axes +from matplotlib.figure import Figure # pylint: disable=too-many-arguments,too-many-locals,too-many-branches def _plot_bar( dataframe: pd.DataFrame, axis: Optional[Axes], - figsize: Optional[Tuple[float, float]], + figsize: Optional[tuple[float, float]], title: str, colormap: Optional[Union[str, mcolors.Colormap]], partition_id_axis: str, size_unit: str, legend: bool, legend_title: Optional[str], - plot_kwargs: Optional[Dict[str, Any]], - legend_kwargs: Optional[Dict[str, Any]], + plot_kwargs: Optional[dict[str, Any]], + legend_kwargs: Optional[dict[str, Any]], ) -> Axes: if axis is None: if figsize is None: @@ -82,10 +83,11 @@ def _plot_bar( if "stacked" not in plot_kwargs: plot_kwargs["stacked"] = True - axis = dataframe.plot( + axis_df: Axes = dataframe.plot( ax=axis, **plot_kwargs, ) + assert axis_df is not None, "axis is None after plotting using DataFrame.plot()" if legend: if legend_kwargs is None: @@ -104,26 +106,28 @@ def _plot_bar( shift = min(0.05 + max_len_label_str / 100, 0.15) legend_kwargs["bbox_to_anchor"] = (1.0 + shift, 0.5) - handles, legend_labels = axis.get_legend_handles_labels() - _ = axis.figure.legend( + handles, legend_labels = axis_df.get_legend_handles_labels() + figure = axis_df.figure + assert isinstance(figure, Figure), "figure extraction from axes is not a Figure" + _ = figure.legend( handles=handles[::-1], labels=legend_labels[::-1], **legend_kwargs ) # Heuristic to make the partition id on xticks non-overlapping if partition_id_axis == "x": - xticklabels = axis.get_xticklabels() + xticklabels = axis_df.get_xticklabels() if len(xticklabels) > 20: # Make every other xtick label not visible for i, label in enumerate(xticklabels): if i % 2 == 1: label.set_visible(False) - return axis + return axis_df def _initialize_figsize( partition_id_axis: str, num_partitions: int, -) -> Tuple[float, float]: +) -> tuple[float, float]: figsize = (0.0, 0.0) if partition_id_axis == "x": figsize = (6.4, 4.8) @@ -132,7 +136,7 @@ def _initialize_figsize( return figsize -def _initialize_xy_labels(size_unit: str, partition_id_axis: str) -> Tuple[str, str]: +def _initialize_xy_labels(size_unit: str, partition_id_axis: str) -> tuple[str, str]: xlabel = "Partition ID" ylabel = "Count" if size_unit == "absolute" else "Percent %" diff --git a/datasets/flwr_datasets/visualization/comparison_label_distribution.py b/datasets/flwr_datasets/visualization/comparison_label_distribution.py index 554f6d78d59a..c741ddee219e 100644 --- a/datasets/flwr_datasets/visualization/comparison_label_distribution.py +++ b/datasets/flwr_datasets/visualization/comparison_label_distribution.py @@ -15,7 +15,7 @@ """Comparison of label distribution plotting.""" -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Literal, Optional, Union import matplotlib.colors as mcolors import matplotlib.pyplot as plt @@ -30,23 +30,24 @@ # pylint: disable=too-many-arguments,too-many-locals +# mypy: disable-error-code="call-overload" def plot_comparison_label_distribution( - partitioner_list: List[Partitioner], - label_name: Union[str, List[str]], - plot_type: str = "bar", - size_unit: str = "percent", - max_num_partitions: Optional[Union[int]] = 30, - partition_id_axis: str = "y", - figsize: Optional[Tuple[float, float]] = None, + partitioner_list: list[Partitioner], + label_name: Union[str, list[str]], + plot_type: Literal["bar", "heatmap"] = "bar", + size_unit: Literal["percent", "absolute"] = "percent", + max_num_partitions: Optional[int] = 30, + partition_id_axis: Literal["x", "y"] = "y", + figsize: Optional[tuple[float, float]] = None, subtitle: str = "Comparison of Per Partition Label Distribution", - titles: Optional[List[str]] = None, + titles: Optional[list[str]] = None, cmap: Optional[Union[str, mcolors.Colormap]] = None, legend: bool = False, legend_title: Optional[str] = None, verbose_labels: bool = True, - plot_kwargs_list: Optional[List[Optional[Dict[str, Any]]]] = None, - legend_kwargs: Optional[Dict[str, Any]] = None, -) -> Tuple[Figure, List[Axes], List[pd.DataFrame]]: + plot_kwargs_list: Optional[list[Optional[dict[str, Any]]]] = None, + legend_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Figure, list[Axes], list[pd.DataFrame]]: """Compare the label distribution across multiple partitioners. Parameters @@ -55,14 +56,14 @@ def plot_comparison_label_distribution( List of partitioners to be compared. label_name : Union[str, List[str]] Column name or list of column names identifying labels for each partitioner. - plot_type : str + plot_type : Literal["bar", "heatmap"] Type of plot, either "bar" or "heatmap". - size_unit : str + size_unit : Literal["percent", "absolute"] "absolute" for raw counts, or "percent" to normalize values to 100%. max_num_partitions : Optional[int] Maximum number of partitions to include in the plot. If None, all partitions are included. - partition_id_axis : str + partition_id_axis : Literal["x", "y"] Axis on which the partition IDs will be marked, either "x" or "y". figsize : Optional[Tuple[float, float]] Size of the figure. If None, a default size is calculated. @@ -143,7 +144,7 @@ def plot_comparison_label_distribution( num_partitioners = len(partitioner_list) if isinstance(label_name, str): label_name = [label_name] * num_partitioners - elif isinstance(label_name, List): + elif isinstance(label_name, list): pass else: raise TypeError( @@ -151,7 +152,14 @@ def plot_comparison_label_distribution( f"{type(label_name)}" ) figsize = _initialize_comparison_figsize(figsize, num_partitioners) - fig, axes = plt.subplots(1, num_partitioners, layout="constrained", figsize=figsize) + axes_sharing = _initialize_axis_sharing(size_unit, plot_type, partition_id_axis) + fig, axes = plt.subplots( + nrows=1, + ncols=num_partitioners, + figsize=figsize, + layout="constrained", + **axes_sharing, + ) if titles is None: titles = ["" for _ in range(num_partitioners)] @@ -201,11 +209,12 @@ def plot_comparison_label_distribution( axis.set_xlabel("") axis.set_ylabel("") axis.set_title(titles[idx]) - for axis in axes[1:]: - axis.set_yticks([]) + _set_tick_on_value_axes(axes, partition_id_axis, size_unit) # Set up figure xlabel and ylabel - xlabel, ylabel = _initialize_comparison_xy_labels(plot_type, partition_id_axis) + xlabel, ylabel = _initialize_comparison_xy_labels( + plot_type, size_unit, partition_id_axis + ) fig.supxlabel(xlabel) fig.supylabel(ylabel) fig.suptitle(subtitle) @@ -215,8 +224,8 @@ def plot_comparison_label_distribution( def _initialize_comparison_figsize( - figsize: Optional[Tuple[float, float]], num_partitioners: int -) -> Tuple[float, float]: + figsize: Optional[tuple[float, float]], num_partitioners: int +) -> tuple[float, float]: if figsize is not None: return figsize x_value = 4 + (num_partitioners - 1) * 2 @@ -226,11 +235,13 @@ def _initialize_comparison_figsize( def _initialize_comparison_xy_labels( - plot_type: str, partition_id_axis: str -) -> Tuple[str, str]: + plot_type: Literal["bar", "heatmap"], + size_unit: Literal["percent", "absolute"], + partition_id_axis: Literal["x", "y"], +) -> tuple[str, str]: if plot_type == "bar": xlabel = "Partition ID" - ylabel = "Class distribution" + ylabel = "Class distribution" if size_unit == "percent" else "Class Count" elif plot_type == "heatmap": xlabel = "Partition ID" ylabel = "Label" @@ -243,3 +254,34 @@ def _initialize_comparison_xy_labels( xlabel, ylabel = ylabel, xlabel return xlabel, ylabel + + +def _initialize_axis_sharing( + size_unit: Literal["percent", "absolute"], + plot_type: Literal["bar", "heatmap"], + partition_id_axis: Literal["x", "y"], +) -> dict[str, bool]: + # Do not intervene when the size_unit is percent and plot_type is heatmap + if size_unit == "percent": + return {} + if plot_type == "heatmap": + return {} + if partition_id_axis == "x": + return {"sharey": True} + if partition_id_axis == "y": + return {"sharex": True} + return {"sharex": False, "sharey": False} + + +def _set_tick_on_value_axes( + axes: list[Axes], + partition_id_axis: Literal["x", "y"], + size_unit: Literal["percent", "absolute"], +) -> None: + if partition_id_axis == "x" and size_unit == "absolute": + # Exclude this case due to sharing of y-axis (and thus y-ticks) + # They must remain set and the number are displayed only on the first plot + pass + else: + for axis in axes[1:]: + axis.set_yticks([]) diff --git a/datasets/flwr_datasets/visualization/heatmap_plot.py b/datasets/flwr_datasets/visualization/heatmap_plot.py index 3c87de7693ae..b5a0e640eb1b 100644 --- a/datasets/flwr_datasets/visualization/heatmap_plot.py +++ b/datasets/flwr_datasets/visualization/heatmap_plot.py @@ -15,7 +15,7 @@ """Label distribution heatmap plotting.""" -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Optional, Union import numpy as np import pandas as pd @@ -29,15 +29,15 @@ def _plot_heatmap( dataframe: pd.DataFrame, axis: Optional[Axes], - figsize: Optional[Tuple[float, float]], + figsize: Optional[tuple[float, float]], title: str, colormap: Optional[Union[str, mcolors.Colormap]], partition_id_axis: str, size_unit: str, legend: bool, legend_title: Optional[str], - plot_kwargs: Optional[Dict[str, Any]], - legend_kwargs: Optional[Dict[str, Any]], + plot_kwargs: Optional[dict[str, Any]], + legend_kwargs: Optional[dict[str, Any]], ) -> Axes: if axis is None: if figsize is None: @@ -90,7 +90,7 @@ def _initialize_figsize( partition_id_axis: str, num_partitions: int, num_labels: int, -) -> Tuple[float, float]: +) -> tuple[float, float]: figsize = (0.0, 0.0) if partition_id_axis == "x": figsize = (3 * np.sqrt(num_partitions), np.sqrt(num_labels)) diff --git a/datasets/flwr_datasets/visualization/label_distribution.py b/datasets/flwr_datasets/visualization/label_distribution.py index 0c47bd204a17..550a4ecae725 100644 --- a/datasets/flwr_datasets/visualization/label_distribution.py +++ b/datasets/flwr_datasets/visualization/label_distribution.py @@ -15,7 +15,7 @@ """Label distribution plotting.""" -from typing import Any, Dict, Optional, Tuple, Union +from typing import Any, Optional, Union import matplotlib.colors as mcolors import pandas as pd @@ -40,15 +40,15 @@ def plot_label_distributions( max_num_partitions: Optional[int] = None, partition_id_axis: str = "x", axis: Optional[Axes] = None, - figsize: Optional[Tuple[float, float]] = None, + figsize: Optional[tuple[float, float]] = None, title: str = "Per Partition Label Distribution", cmap: Optional[Union[str, mcolors.Colormap]] = None, legend: bool = False, legend_title: Optional[str] = None, verbose_labels: bool = True, - plot_kwargs: Optional[Dict[str, Any]] = None, - legend_kwargs: Optional[Dict[str, Any]] = None, -) -> Tuple[Figure, Axes, pd.DataFrame]: + plot_kwargs: Optional[dict[str, Any]] = None, + legend_kwargs: Optional[dict[str, Any]] = None, +) -> tuple[Figure, Axes, pd.DataFrame]: """Plot the label distribution of the partitions. Parameters @@ -245,5 +245,7 @@ def plot_label_distributions( plot_kwargs, legend_kwargs, ) - assert axis is not None - return axis.figure, axis, dataframe + assert axis is not None, "axis is None after plotting" + figure = axis.figure + assert isinstance(figure, Figure), "figure extraction from axes is not a Figure" + return figure, axis, dataframe diff --git a/datasets/pyproject.toml b/datasets/pyproject.toml index 46ecb56233d3..af7c1f1bde2a 100644 --- a/datasets/pyproject.toml +++ b/datasets/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr-datasets" -version = "0.3.0" +version = "0.4.0" description = "Flower Datasets" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -31,7 +31,6 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -52,9 +51,9 @@ exclude = [ ] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" numpy = "^1.21.0" -datasets = ">=2.14.6 <2.20.0" +datasets = ">=2.14.6 <=3.1.0" pillow = { version = ">=6.2.1", optional = true } soundfile = { version = ">=0.12.1", optional = true } librosa = { version = ">=0.10.0.post2", optional = true } @@ -92,7 +91,7 @@ known_first_party = ["flwr_datasets"] [tool.black] line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] disable = "duplicate-code,too-few-public-methods,useless-import-alias" @@ -130,7 +129,7 @@ wrap-summaries = 88 wrap-descriptions = 88 [tool.ruff] -target-version = "py38" +target-version = "py39" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] diff --git a/dev/build-docker-image-matrix.py b/dev/build-docker-image-matrix.py index f9822574d0d5..52c96e3cca7a 100644 --- a/dev/build-docker-image-matrix.py +++ b/dev/build-docker-image-matrix.py @@ -22,7 +22,6 @@ class Distro: LATEST_SUPPORTED_PYTHON_VERSION = "3.11" SUPPORTED_PYTHON_VERSIONS = [ - "3.8", "3.9", "3.10", LATEST_SUPPORTED_PYTHON_VERSION, @@ -135,7 +134,7 @@ def tag_latest_ubuntu_with_flwr_version(image: BaseImage) -> List[str]: ubuntu_base_images = generate_base_images( flwr_version, SUPPORTED_PYTHON_VERSIONS, - [Distro(DistroName.UBUNTU, "22.04")], + [Distro(DistroName.UBUNTU, "24.04")], ) # alpine base images for the latest supported python version alpine_base_images = generate_base_images( diff --git a/dev/changelog_config.toml b/dev/changelog_config.toml index 05527e2b2cb3..3c155387ef93 100644 --- a/dev/changelog_config.toml +++ b/dev/changelog_config.toml @@ -3,972 +3,979 @@ type = ["ci", "docs", "feat", "fix", "refactor", "break"] -project = ["framework", "baselines", "datasets", "examples", "benchmarks"] +project = [ + "framework", + "baselines", + "datasets", + "examples", + "benchmarks", + "glossary", +] scope = "skip" pattern_template = "^({types})\\(({projects})(?::({scope}))?\\) ([A-Z][^\\n]*[^\\.\\n])$" -allowed_verbs=[ - "Abandon", - "Abort", - "Abstract", - "Accept", - "Accomodate", - "Accompany", - "Account", - "Accumulate", - "Accuse", - "Ache", - "Achieve", - "Acknowledge", - "Acquire", - "Act", - "Activate", - "Active", - "Adapt", - "Add", - "Address", - "Adhere", - "Adjust", - "Admit", - "Adopt", - "Advance", - "Advise", - "Advocate", - "Affect", - "Affirm", - "Afford", - "Agree", - "Aim", - "Align", - "Allow", - "Alter", - "Amend", - "Analyse", - "Analyze", - "Anchor", - "Annotate", - "Announce", - "Annoy", - "Annul", - "Answer", - "Appeal", - "Appear", - "Append", - "Applicate", - "Apply", - "Appoint", - "Appreciate", - "Approach", - "Approve", - "Argue", - "Arise", - "Arrange", - "Arrest", - "Arrive", - "Ask", - "Assert", - "Assess", - "Assign", - "Assist", - "Associate", - "Assume", - "Assure", - "Attach", - "Attack", - "Attempt", - "Attend", - "Attract", - "Augment", - "Avoid", - "Awake", - "Back", - "Backport", - "Backup", - "Bake", - "Base", - "Battle", - "Be", - "Bear", - "Beat", - "Become", - "Begin", - "Behave", - "Believe", - "Belong", - "Bend", - "Benefit", - "Better", - "Beware", - "Bind", - "Blacklist", - "Blame", - "Blend", - "Block", - "Blow", - "Blur", - "Bootstrap", - "Born", - "Borrow", - "Bother", - "Break", - "Bridge", - "Bring", - "Broadcast", - "Buffer", - "Build", - "Bump", - "Bundle", - "Burn", - "Busy", - "Buy", - "Bypass", - "Cache", - "Calculate", - "Call", - "Cancel", - "Capitalize", - "Capture", - "Care", - "Carry", - "Carryout", - "Cast", - "Catch", - "Categorize", - "Cause", - "Center", - "Centralize", - "Challenge", - "Change", - "Chant", - "Charge", - "Chase", - "Chat", - "Check", - "Choose", - "Circle", - "Claim", - "Clarify", - "Clean", - "Cleanse", - "Clear", - "Climb", - "Clip", - "Close", - "Clothe", - "Coalesce", - "Collapse", - "Collect", - "Combine", - "Come", - "Command", - "Comment", - "Commit", - "Compare", - "Compensate", - "Compile", - "Complain", - "Complement", - "Complete", - "Compose", - "Compress", - "Compute", - "Conceal", - "Concentrate", - "Conclude", - "Concur", - "Conduct", - "Configure", - "Confirm", - "Confront", - "Connect", - "Connote", - "Consider", - "Consist", - "Consolidate", - "Constitute", - "Construct", - "Consume", - "Contact", - "Contain", - "Contest", - "Continue", - "Contribute", - "Control", - "Convert", - "Convey", - "Cook", - "Coordinate", - "Cope", - "Copy", - "Correct", - "Cost", - "Counsel", - "Count", - "Cover", - "Create", - "Cross", - "Cry", - "Cut", - "Cycle", - "Damage", - "Dance", - "Deal", - "Debate", - "Decide", - "Declare", - "Decode", - "Deconstruct", - "Decouple", - "Decrease", - "Dedup", - "Duplicate", - "Deduplicate", - "Default", - "Defeat", - "Defend", - "Defer", - "Define", - "Delay", - "Delegate", - "Delete", - "Deliver", - "Demand", - "Demolish", - "Demonstrate", - "Deny", - "Depart", - "Depend", - "Depict", - "Deprecate", - "Derive", - "Describe", - "Deserialize", - "Design", - "Desire", - "Destroy", - "Detail", - "Detect", - "Determine", - "Develop", - "Devote", - "Die", - "Dim", - "Direct", - "Disable", - "Disallow", - "Disappear", - "Disconnect", - "Discontinue", - "Discourage", - "Discover", - "Discuss", - "Dislike", - "Dismiss", - "Dispatch", - "Displace", - "Display", - "Distinguish", - "Divide", - "Do", - "Document", - "Dominate", - "Downgrade", - "Download", - "Draw", - "Dread", - "Dress", - "Drink", - "Drive", - "Drop", - "Dry", - "Dump", - "Duplicate", - "Earn", - "Eat", - "Echo", - "Edit", - "Educate", - "Elaborate", - "Elect", - "Elevate", - "Eliminate", - "Embed", - "Emerge", - "Emit", - "Employ", - "Empty", - "Enable", - "Encapsulate", - "Encourage", - "End", - "Endorse", - "Endure", - "Enforce", - "Engage", - "Enhance", - "Enjoy", - "Enquire", - "Enroll", - "Ensure", - "Enter", - "Enumerate", - "Equal", - "Equate", - "Erase", - "Escape", - "Establish", - "Estimate", - "Evaluate", - "Examine", - "Except", - "Exclude", - "Excuse", - "Execute", - "Exempt", - "Exercise", - "Exert", - "Exist", - "Exit", - "Expand", - "Expect", - "Experience", - "Explain", - "Explore", - "Export", - "Expose", - "Express", - "Extend", - "Extract", - "Face", - "Factor", - "Fail", - "Fall", - "Fault", - "Favor", - "Fear", - "Feature", - "Feed", - "Feel", - "Fetch", - "Fight", - "Fill", - "Filter", - "Find", - "Finish", - "Fit", - "Fix", - "Flatten", - "Flee", - "Flip", - "Float", - "Flow", - "Flunk", - "Flush", - "Fly", - "Focus", - "Fold", - "Follow", - "Force", - "Foresee", - "Forget", - "Fork", - "Form", - "Formalize", - "Format", - "Forward", - "Found", - "Free", - "Freeze", - "Gain", - "Gather", - "Generalize", - "Generate", - "Get", - "Gitignore", - "Give", - "Giveup", - "Glance", - "Go", - "Going", - "Govern", - "Grant", - "Grin", - "Group", - "Grow", - "Guard", - "Guess", - "Guide", - "Hack", - "Halt", - "Hand", - "Handle", - "Hang", - "Happen", - "Hardcode", - "Harm", - "Hate", - "Have", - "Head", - "Hear", - "Help", - "Hide", - "Highlight", - "Hint", - "Hire", - "Hit", - "Hold", - "Hook", - "Hope", - "House", - "Hurt", - "Identify", - "Ignore", - "Illuminate", - "Illustrate", - "Imagine", - "Impersonate", - "Implement", - "Imply", - "Import", - "Importune", - "Impose", - "Improve", - "Include", - "Incorporate", - "Increase", - "Incur", - "Indent", - "Indicate", - "Infer", - "Influence", - "Inform", - "Inherit", - "Init", - "Initialize", - "Initiate", - "Injure", - "In-line", - "Inline", - "Insist", - "Install", - "Instantiate", - "Instruct", - "Integrate", - "Intend", - "Intercept", - "Internalize", - "Interpret", - "Introduce", - "Invalidate", - "Invert", - "Invest", - "Investigate", - "Invite", - "Invoke", - "Involve", - "Isolate", - "Issue", - "Join", - "Journey", - "Joy", - "Judge", - "Jump", - "Justify", - "Keep", - "Key", - "Kick", - "Kill", - "Kiss", - "Knock", - "Know", - "Label", - "Lack", - "Land", - "Last", - "Laugh", - "Launch", - "Lay", - "Lead", - "Lean", - "Leap", - "Learn", - "Leave", - "Let", - "Lie", - "Lift", - "Light", - "Like", - "Limit", - "Link", - "List", - "Listen", - "Live", - "Load", - "Localize", - "Locate", - "Lock", - "Log", - "Login", - "Look", - "Loop", - "Lose", - "Love", - "Lower", - "Maintain", - "Make", - "Manage", - "Map", - "Mark", - "Marry", - "Match", - "Materialize", - "Matter", - "Mean", - "Measure", - "Meet", - "Memoize", - "Menace", - "Mention", - "Merge", - "Migrate", - "Mind", - "Mirror", - "Misinform", - "Miss", - "Mix", - "Mock", - "Modernize", - "Modify", - "Monitor", - "Monomorphize", - "Move", - "Mutate", - "Name", - "Navigate", - "Near", - "Need", - "Nod", - "Normalize", - "Notarize", - "Note", - "Notice", - "Notify", - "Observe", - "Obtain", - "Occupy", - "Occur", - "Offer", - "Officiate", - "Omit", - "Open", - "Operate", - "Optimise", - "Optimize", - "Order", - "Organise", - "Organize", - "Output", - "Overhaul", - "Override", - "Overwrite", - "Owe", - "Own", - "Pack", - "Package", - "Paint", - "Panic", - "Parameterize", - "Parse", - "Partake", - "Pass", - "Patch", - "Pause", - "Pay", - "Perform", - "Permit", - "Persist", - "Persuade", - "Pick", - "Pin", - "Ping", - "Pipe", - "Place", - "Plan", - "Play", - "Plow", - "Point", - "Ponder", - "Populate", - "Port", - "Position", - "Possess", - "Pour", - "Predict", - "Prefer", - "Prefix", - "Prepare", - "Present", - "Preserve", - "Press", - "Presume", - "Prevent", - "Print", - "Prioritize", - "Privatize", - "Proceed", - "Process", - "Procure", - "Produce", - "Prolong", - "Promise", - "Promote", - "Prompt", - "Propagate", - "Propose", - "Prosecute", - "Protect", - "Protest", - "Prove", - "Provide", - "Prune", - "Publish", - "Pull", - "Purchase", - "Purge", - "Pursue", - "Push", - "Put", - "Puton", - "Qualify", - "Query", - "Question", - "Queue", - "Quit", - "Quote", - "Race", - "Raise", - "Randomize", - "Reach", - "React", - "Read", - "Realise", - "Realize", - "Reapply", - "Rearrange", - "Reason", - "Rebuild", - "Recall", - "Receive", - "Reckon", - "Recognise", - "Recognize", - "Recommend", - "Reconnect", - "Record", - "Recover", - "Recur", - "Redact", - "Re-define", - "Redefine", - "Re-design", - "Redesign", - "Redirect", - "Re-do", - "Redo", - "Reduce", - "Re-enable", - "Refactor", - "Refer", - "Reference", - "Refine", - "Reflect", - "Reformat", - "Refresh", - "Refuse", - "Regard", - "Regenerate", - "Register", - "Reimplement", - "Re-instate", - "Reinstate", - "Reject", - "Relate", - "Relax", - "Release", - "Reload", - "Rely", - "Remain", - "Remember", - "Remind", - "Remove", - "Rename", - "Render", - "Re-order", - "Reorder", - "Reorganise", - "Reorganize", - "Repair", - "Reparent", - "Repeat", - "Repel", - "Rephrase", - "Replace", - "Reply", - "Report", - "Reposition", - "Represent", - "Request", - "Require", - "Rerender", - "Rerun", - "Re-scale", - "Rescale", - "Research", - "Re-set", - "Reset", - "Reside", - "Resize", - "Resolve", - "Respect", - "Respond", - "Rest", - "Restart", - "Restore", - "Restrict", - "Restructure", - "Result", - "Resume", - "Resurface", - "Retain", - "Retire", - "Retreat", - "Retrieve", - "Retry", - "Return", - "Reuse", - "Revamp", - "Reveal", - "Reverse", - "Revert", - "Review", - "Revise", - "Revisit", - "Revoke", - "Reword", - "Re-wrap", - "Rewrap", - "Rewrite", - "Ride", - "Ring", - "Rise", - "Roll", - "Rotate", - "Round", - "Route", - "Rule", - "Run", - "Sale", - "Salute", - "Sample", - "Sanitize", - "Save", - "Say", - "Scale", - "Scope", - "Score", - "Scroll", - "Search", - "Secure", - "See", - "Seek", - "Seem", - "Select", - "Self-initialize", - "Sell", - "Send", - "Separate", - "Serialize", - "Serve", - "Set", - "Settle", - "Shake", - "Shape", - "Share", - "Shift", - "Shoot", - "Shorten", - "Shout", - "Show", - "Shrink", - "Shuffle", - "Shut", - "Sign", - "Signify", - "Silence", - "Simplify", - "Simulate", - "Sing", - "Sit", - "Size", - "Skip", - "Sleep", - "Slide", - "Slip", - "Smile", - "Solve", - "Sort", - "Sound", - "Source", - "Spawn", - "Speak", - "Specify", - "Spend", - "Split", - "Spread", - "Stand", - "Standardize", - "Stare", - "Start", - "State", - "Stay", - "Steal", - "Steer", - "Step", - "Stick", - "Stop", - "Store", - "Stress", - "Stretch", - "Strike", - "Stringify", - "Strip", - "Struggle", - "Stub", - "Study", - "Style", - "Subclass", - "Submit", - "Substitute", - "Subtract", - "Succeed", - "Suffer", - "Suggest", - "Suit", - "Supply", - "Support", - "Suppose", - "Suppress", - "Surround", - "Survive", - "Suspect", - "Swallow", - "Swap", - "Sway", - "Switch", - "Sync", - "Synchronise", - "Synchronize", - "Synthesize", - "Take", - "Talk", - "Talkover", - "Target", - "Teach", - "Tell", - "Tempt", - "Tend", - "Terminate", - "Test", - "Testify", - "Thank", - "Think", - "Threaten", - "Throw", - "Tie", - "Time", - "Toggle", - "Touch", - "Track", - "Trade", - "Train", - "Transfer", - "Transform", - "Translate", - "Transpile", - "Trash", - "Travel", - "Tread", - "Treat", - "Trigger", - "Trim", - "Truncate", - "Trust", - "Try", - "Tune", - "Turn", - "Tweak", - "Twist", - "Unblock", - "Uncomment", - "Uncover", - "Understand", - "Undertake", - "Undo", - "Undry", - "Unescape", - "Unfold", - "Unify", - "Unignore", - "Unite", - "Unload", - "Unlock", - "Unpack", - "Unregister", - "Unskip", - "Unsubscribe", - "Untrack", - "Unwrap", - "Update", - "Upgrade", - "Upload", - "Urge", - "Use", - "Utter", - "Validate", - "Value", - "Vanish", - "Vary", - "Verbosify", - "Verify", - "View", - "Visit", - "Vocalize", - "Voice", - "Vote", - "Wait", - "Wake", - "Walk", - "Want", - "Warn", - "Warrant", - "Wash", - "Watch", - "Wear", - "Weep", - "Weigh", - "Welcome", - "Whitelist", - "Win", - "Wipe", - "Wire", - "Wish", - "Withdraw", - "Wonder", - "Work", - "Workout", - "Worry", - "Wrap", - "Write" +allowed_verbs = [ + "Abandon", + "Abort", + "Abstract", + "Accept", + "Accomodate", + "Accompany", + "Account", + "Accumulate", + "Accuse", + "Ache", + "Achieve", + "Acknowledge", + "Acquire", + "Act", + "Activate", + "Active", + "Adapt", + "Add", + "Address", + "Adhere", + "Adjust", + "Admit", + "Adopt", + "Advance", + "Advise", + "Advocate", + "Affect", + "Affirm", + "Afford", + "Agree", + "Aim", + "Align", + "Allow", + "Alter", + "Amend", + "Analyse", + "Analyze", + "Anchor", + "Annotate", + "Announce", + "Annoy", + "Annul", + "Answer", + "Appeal", + "Appear", + "Append", + "Applicate", + "Apply", + "Appoint", + "Appreciate", + "Approach", + "Approve", + "Argue", + "Arise", + "Arrange", + "Arrest", + "Arrive", + "Ask", + "Assert", + "Assess", + "Assign", + "Assist", + "Associate", + "Assume", + "Assure", + "Attach", + "Attack", + "Attempt", + "Attend", + "Attract", + "Augment", + "Avoid", + "Awake", + "Back", + "Backport", + "Backup", + "Bake", + "Base", + "Battle", + "Be", + "Bear", + "Beat", + "Become", + "Begin", + "Behave", + "Believe", + "Belong", + "Bend", + "Benefit", + "Better", + "Beware", + "Bind", + "Blacklist", + "Blame", + "Blend", + "Block", + "Blow", + "Blur", + "Bootstrap", + "Born", + "Borrow", + "Bother", + "Break", + "Bridge", + "Bring", + "Broadcast", + "Buffer", + "Build", + "Bump", + "Bundle", + "Burn", + "Busy", + "Buy", + "Bypass", + "Cache", + "Calculate", + "Call", + "Cancel", + "Capitalize", + "Capture", + "Care", + "Carry", + "Carryout", + "Cast", + "Catch", + "Categorize", + "Cause", + "Center", + "Centralize", + "Challenge", + "Change", + "Chant", + "Charge", + "Chase", + "Chat", + "Check", + "Choose", + "Circle", + "Claim", + "Clarify", + "Clean", + "Cleanse", + "Clear", + "Climb", + "Clip", + "Close", + "Clothe", + "Coalesce", + "Collapse", + "Collect", + "Combine", + "Come", + "Command", + "Comment", + "Commit", + "Compare", + "Compensate", + "Compile", + "Complain", + "Complement", + "Complete", + "Compose", + "Compress", + "Compute", + "Conceal", + "Concentrate", + "Conclude", + "Concur", + "Conduct", + "Configure", + "Confirm", + "Confront", + "Connect", + "Connote", + "Consider", + "Consist", + "Consolidate", + "Constitute", + "Construct", + "Consume", + "Contact", + "Contain", + "Contest", + "Continue", + "Contribute", + "Control", + "Convert", + "Convey", + "Cook", + "Coordinate", + "Cope", + "Copy", + "Correct", + "Cost", + "Counsel", + "Count", + "Cover", + "Create", + "Cross", + "Cry", + "Cut", + "Cycle", + "Damage", + "Dance", + "Deal", + "Debate", + "Decide", + "Declare", + "Decode", + "Deconstruct", + "Decouple", + "Decrease", + "Dedup", + "Duplicate", + "Deduplicate", + "Default", + "Defeat", + "Defend", + "Defer", + "Define", + "Delay", + "Delegate", + "Delete", + "Deliver", + "Demand", + "Demolish", + "Demonstrate", + "Deny", + "Depart", + "Depend", + "Depict", + "Deprecate", + "Derive", + "Describe", + "Deserialize", + "Design", + "Desire", + "Destroy", + "Detail", + "Detect", + "Determine", + "Develop", + "Devote", + "Die", + "Dim", + "Direct", + "Disable", + "Disallow", + "Disappear", + "Disconnect", + "Discontinue", + "Discourage", + "Discover", + "Discuss", + "Dislike", + "Dismiss", + "Dispatch", + "Displace", + "Display", + "Distinguish", + "Divide", + "Do", + "Document", + "Dominate", + "Downgrade", + "Download", + "Draw", + "Dread", + "Dress", + "Drink", + "Drive", + "Drop", + "Dry", + "Dump", + "Duplicate", + "Earn", + "Eat", + "Echo", + "Edit", + "Educate", + "Elaborate", + "Elect", + "Elevate", + "Eliminate", + "Embed", + "Emerge", + "Emit", + "Employ", + "Empty", + "Enable", + "Encapsulate", + "Encourage", + "End", + "Endorse", + "Endure", + "Enforce", + "Engage", + "Enhance", + "Enjoy", + "Enquire", + "Enroll", + "Ensure", + "Enter", + "Enumerate", + "Equal", + "Equate", + "Erase", + "Escape", + "Establish", + "Estimate", + "Evaluate", + "Examine", + "Except", + "Exclude", + "Excuse", + "Execute", + "Exempt", + "Exercise", + "Exert", + "Exist", + "Exit", + "Expand", + "Expect", + "Experience", + "Explain", + "Explore", + "Export", + "Expose", + "Express", + "Extend", + "Extract", + "Face", + "Factor", + "Fail", + "Fall", + "Fault", + "Favor", + "Fear", + "Feature", + "Feed", + "Feel", + "Fetch", + "Fight", + "Fill", + "Filter", + "Find", + "Finish", + "Fit", + "Fix", + "Flatten", + "Flee", + "Flip", + "Float", + "Flow", + "Flunk", + "Flush", + "Fly", + "Focus", + "Fold", + "Follow", + "Force", + "Foresee", + "Forget", + "Fork", + "Form", + "Formalize", + "Format", + "Forward", + "Found", + "Free", + "Freeze", + "Gain", + "Gather", + "Generalize", + "Generate", + "Get", + "Gitignore", + "Give", + "Giveup", + "Glance", + "Go", + "Going", + "Govern", + "Grant", + "Grin", + "Group", + "Grow", + "Guard", + "Guess", + "Guide", + "Hack", + "Halt", + "Hand", + "Handle", + "Hang", + "Happen", + "Hardcode", + "Harm", + "Hate", + "Have", + "Head", + "Hear", + "Help", + "Hide", + "Highlight", + "Hint", + "Hire", + "Hit", + "Hold", + "Hook", + "Hope", + "House", + "Hurt", + "Identify", + "Ignore", + "Illuminate", + "Illustrate", + "Imagine", + "Impersonate", + "Implement", + "Imply", + "Import", + "Importune", + "Impose", + "Improve", + "Include", + "Incorporate", + "Increase", + "Incur", + "Indent", + "Indicate", + "Infer", + "Influence", + "Inform", + "Inherit", + "Init", + "Initialize", + "Initiate", + "Injure", + "In-line", + "Inline", + "Insist", + "Install", + "Instantiate", + "Instruct", + "Integrate", + "Intend", + "Intercept", + "Internalize", + "Interpret", + "Introduce", + "Invalidate", + "Invert", + "Invest", + "Investigate", + "Invite", + "Invoke", + "Involve", + "Isolate", + "Issue", + "Join", + "Journey", + "Joy", + "Judge", + "Jump", + "Justify", + "Keep", + "Key", + "Kick", + "Kill", + "Kiss", + "Knock", + "Know", + "Label", + "Lack", + "Land", + "Last", + "Laugh", + "Launch", + "Lay", + "Lead", + "Lean", + "Leap", + "Learn", + "Leave", + "Let", + "Lie", + "Lift", + "Light", + "Like", + "Limit", + "Link", + "List", + "Listen", + "Live", + "Load", + "Localize", + "Locate", + "Lock", + "Log", + "Login", + "Look", + "Loop", + "Lose", + "Love", + "Lower", + "Maintain", + "Make", + "Manage", + "Map", + "Mark", + "Marry", + "Match", + "Materialize", + "Matter", + "Mean", + "Measure", + "Meet", + "Memoize", + "Menace", + "Mention", + "Merge", + "Migrate", + "Mind", + "Mirror", + "Misinform", + "Miss", + "Mix", + "Mock", + "Modernize", + "Modify", + "Monitor", + "Monomorphize", + "Move", + "Mutate", + "Name", + "Navigate", + "Near", + "Need", + "Nod", + "Normalize", + "Notarize", + "Note", + "Notice", + "Notify", + "Observe", + "Obtain", + "Occupy", + "Occur", + "Offer", + "Officiate", + "Omit", + "Open", + "Operate", + "Optimise", + "Optimize", + "Order", + "Organise", + "Organize", + "Output", + "Overhaul", + "Override", + "Overwrite", + "Owe", + "Own", + "Pack", + "Package", + "Paint", + "Panic", + "Parameterize", + "Parse", + "Partake", + "Pass", + "Patch", + "Pause", + "Pay", + "Perform", + "Permit", + "Persist", + "Persuade", + "Pick", + "Pin", + "Ping", + "Pipe", + "Place", + "Plan", + "Play", + "Plow", + "Point", + "Ponder", + "Populate", + "Port", + "Position", + "Possess", + "Pour", + "Predict", + "Prefer", + "Prefix", + "Prepare", + "Present", + "Preserve", + "Press", + "Presume", + "Prevent", + "Print", + "Prioritize", + "Privatize", + "Proceed", + "Process", + "Procure", + "Produce", + "Prolong", + "Promise", + "Promote", + "Prompt", + "Propagate", + "Propose", + "Prosecute", + "Protect", + "Protest", + "Prove", + "Provide", + "Prune", + "Publish", + "Pull", + "Purchase", + "Purge", + "Pursue", + "Push", + "Put", + "Puton", + "Qualify", + "Query", + "Question", + "Queue", + "Quit", + "Quote", + "Race", + "Raise", + "Randomize", + "Reach", + "React", + "Read", + "Realise", + "Realize", + "Reapply", + "Rearrange", + "Reason", + "Rebuild", + "Recall", + "Receive", + "Reckon", + "Recognise", + "Recognize", + "Recommend", + "Reconnect", + "Record", + "Recover", + "Recur", + "Redact", + "Re-define", + "Redefine", + "Re-design", + "Redesign", + "Redirect", + "Re-do", + "Redo", + "Reduce", + "Re-enable", + "Refactor", + "Refer", + "Reference", + "Refine", + "Reflect", + "Reformat", + "Refresh", + "Refuse", + "Regard", + "Regenerate", + "Register", + "Reimplement", + "Re-instate", + "Reinstate", + "Reject", + "Relate", + "Relax", + "Release", + "Reload", + "Rely", + "Remain", + "Remember", + "Remind", + "Remove", + "Rename", + "Render", + "Re-order", + "Reorder", + "Reorganise", + "Reorganize", + "Repair", + "Reparent", + "Repeat", + "Repel", + "Rephrase", + "Replace", + "Reply", + "Report", + "Reposition", + "Represent", + "Request", + "Require", + "Rerender", + "Rerun", + "Re-scale", + "Rescale", + "Research", + "Re-set", + "Reset", + "Reside", + "Resize", + "Resolve", + "Respect", + "Respond", + "Rest", + "Restart", + "Restore", + "Restrict", + "Restructure", + "Result", + "Resume", + "Resurface", + "Retain", + "Retire", + "Retreat", + "Retrieve", + "Retry", + "Return", + "Reuse", + "Revamp", + "Reveal", + "Reverse", + "Revert", + "Review", + "Revise", + "Revisit", + "Revoke", + "Reword", + "Re-wrap", + "Rewrap", + "Rewrite", + "Ride", + "Ring", + "Rise", + "Roll", + "Rotate", + "Round", + "Route", + "Rule", + "Run", + "Sale", + "Salute", + "Sample", + "Sanitize", + "Save", + "Say", + "Scale", + "Scope", + "Score", + "Scroll", + "Search", + "Secure", + "See", + "Seek", + "Seem", + "Select", + "Self-initialize", + "Sell", + "Send", + "Separate", + "Serialize", + "Serve", + "Set", + "Settle", + "Shake", + "Shape", + "Share", + "Shift", + "Shoot", + "Shorten", + "Shout", + "Show", + "Shrink", + "Shuffle", + "Shut", + "Sign", + "Signify", + "Silence", + "Simplify", + "Simulate", + "Sing", + "Sit", + "Size", + "Skip", + "Sleep", + "Slide", + "Slip", + "Smile", + "Solve", + "Sort", + "Sound", + "Source", + "Spawn", + "Speak", + "Specify", + "Spend", + "Split", + "Spread", + "Stand", + "Standardize", + "Stare", + "Start", + "State", + "Stay", + "Steal", + "Steer", + "Step", + "Stick", + "Stop", + "Store", + "Stress", + "Stretch", + "Strike", + "Stringify", + "Strip", + "Struggle", + "Stub", + "Study", + "Style", + "Subclass", + "Submit", + "Substitute", + "Subtract", + "Succeed", + "Suffer", + "Suggest", + "Suit", + "Supply", + "Support", + "Suppose", + "Suppress", + "Surround", + "Survive", + "Suspect", + "Swallow", + "Swap", + "Sway", + "Switch", + "Sync", + "Synchronise", + "Synchronize", + "Synthesize", + "Take", + "Talk", + "Talkover", + "Target", + "Teach", + "Tell", + "Tempt", + "Tend", + "Terminate", + "Test", + "Testify", + "Thank", + "Think", + "Threaten", + "Throw", + "Tie", + "Time", + "Toggle", + "Touch", + "Track", + "Trade", + "Train", + "Transfer", + "Transform", + "Translate", + "Transpile", + "Trash", + "Travel", + "Tread", + "Treat", + "Trigger", + "Trim", + "Truncate", + "Trust", + "Try", + "Tune", + "Turn", + "Tweak", + "Twist", + "Unblock", + "Uncomment", + "Uncover", + "Understand", + "Undertake", + "Undo", + "Undry", + "Unescape", + "Unfold", + "Unify", + "Unignore", + "Unite", + "Unload", + "Unlock", + "Unpack", + "Unregister", + "Unskip", + "Unsubscribe", + "Untrack", + "Unwrap", + "Update", + "Upgrade", + "Upload", + "Urge", + "Use", + "Utter", + "Validate", + "Value", + "Vanish", + "Vary", + "Verbosify", + "Verify", + "View", + "Visit", + "Vocalize", + "Voice", + "Vote", + "Wait", + "Wake", + "Walk", + "Want", + "Warn", + "Warrant", + "Wash", + "Watch", + "Wear", + "Weep", + "Weigh", + "Welcome", + "Whitelist", + "Win", + "Wipe", + "Wire", + "Wish", + "Withdraw", + "Wonder", + "Work", + "Workout", + "Worry", + "Wrap", + "Write", ] diff --git a/dev/format.sh b/dev/format.sh index e1e2abc307f1..a3129b932e5d 100755 --- a/dev/format.sh +++ b/dev/format.sh @@ -2,6 +2,8 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ +taplo fmt + # Python python -m flwr_tool.check_copyright src/py/flwr python -m flwr_tool.init_py_fix src/py/flwr @@ -36,3 +38,6 @@ python -m nbstripout examples/*/*.ipynb --extra-keys "$KEYS" # Markdown python -m mdformat --number doc/source examples + +# RST +docstrfmt doc/source diff --git a/dev/setup-defaults.sh b/dev/setup-defaults.sh index 36cbfe4df671..af5f0cb9d3ce 100755 --- a/dev/setup-defaults.sh +++ b/dev/setup-defaults.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -version=${1:-3.8.17} +version=${1:-3.9.20} # To install pyenv and virtualenv plugin function install_pyenv(){ diff --git a/dev/test.sh b/dev/test.sh index 58ac0b3d24cd..b8eeed14bc46 100755 --- a/dev/test.sh +++ b/dev/test.sh @@ -26,6 +26,10 @@ echo "- docformatter: start" python -m docformatter -c -r src/py/flwr e2e -e src/py/flwr/proto echo "- docformatter: done" +echo "- docsig: start" +docsig src/py/flwr +echo "- docsig: done" + echo "- ruff: start" python -m ruff check src/py/flwr echo "- ruff: done" @@ -56,6 +60,22 @@ echo "- mdformat: done" echo "- All Markdown checks passed" +echo "- Start TOML checks" + +echo "- taplo: start" +taplo fmt --check +echo "- taplo: done" + +echo "- All TOML checks passed" + +echo "- Start rST checks" + +echo "- docstrfmt: start" +docstrfmt --check doc/source +echo "- docstrfmt: done" + +echo "- All rST checks passed" + echo "- Start license checks" echo "- copyright: start" diff --git a/dev/update_python.py b/dev/update_python.py new file mode 100644 index 000000000000..5eea6af75488 --- /dev/null +++ b/dev/update_python.py @@ -0,0 +1,238 @@ +"""Script to update Python versions in the codebase.""" + +import argparse +import re +from pathlib import Path + + +def _compute_old_version(new_version): + """Compute the old version as the immediate previous minor version.""" + major_str, minor_str = new_version.split(".") + major = int(major_str) + minor = int(minor_str) + + if minor > 0: + old_minor = minor - 1 + old_version = f"{major}.{old_minor}" + else: + raise ValueError("Minor version is 0, can't infer previous version.") + return old_version + + +def _update_python_versions( + new_full_version, + patch_only=False, + dry_run=False, +): + """Update Python version strings in the specified files.""" + new_major_minor = ".".join(new_full_version.split(".")[:2]) + + if patch_only: + print(f"Updating patch version for {new_major_minor} to {new_full_version}") + + # Define the version pattern to match any full version with the same major.minor + version_pattern = re.escape(new_major_minor) + r"\.\d+" + + # Define the file patterns and corresponding replacement patterns + replacements = { + # Shell scripts + "dev/*.sh": [ + # Update version in scripts + ( + r"(version=\$\{1:-)" + version_pattern + r"(\})", + r"\g<1>" + new_full_version + r"\g<2>", + ), + # Update pyenv uninstall commands + ( + r"(pyenv uninstall -f flower-)" + version_pattern, + r"\g<1>" + new_full_version, + ), + ], + # Python files + "**/*.py": [ + # Update version assignments + ( + r'(["\'])' + version_pattern + r'(["\'])', + r"\g<1>" + new_full_version + r"\g<2>", + ), + ], + # Documentation files + "doc/source/conf.py": [ + # Update Python full version in conf.py + ( + r"(\.\.\s*\|python_full_version\|\s*replace::\s*)" + + version_pattern, + r"\g<1>" + new_full_version, + ), + ], + } + else: + # Compute old_version as immediate previous minor version + old_version = _compute_old_version(new_major_minor) + + print(f"Determined old version: {old_version}") + print( + f"Updating to new version: {new_major_minor} " + f"(full version: {new_full_version})" + ) + + # Define the file patterns and corresponding replacement patterns + replacements = { + # action.yml files + ".github/actions/bootstrap/action.yml": [ + # Update default Python version + ( + r"^(\s*default:\s*)" + re.escape(old_version) + r"(\s*)$", + r"\g<1>" + new_major_minor + r"\g<2>", + ), + ], + # YAML workflow files + ".github/workflows/*.yml": [ + # Update specific python-version entries + ( + r"^(\s*python-version:\s*)" + re.escape(old_version) + r"(\s*)$", + r"\g<1>" + new_major_minor + r"\g<2>", + ), + ( + r"(['\"]?)" + re.escape(old_version) + r"(['\"]?,?\s*)", + lambda m: ( + "" if m.group(2).strip() == "," else "" + ), # Handle the case where a comma follows + ), + ], + # Shell scripts + "dev/*.sh": [ + # Update version in scripts + ( + r"(version=\$\{1:-)" + re.escape(old_version) + r"(\.\d+)?(\})", + r"\g<1>" + new_full_version + r"\g<3>", + ), + # Update pyenv uninstall commands + ( + r"(pyenv uninstall -f flower-)" + + re.escape(old_version) + + r"(\.\d+)?", + r"\g<1>" + new_full_version, + ), + ], + # pyproject.toml files + "**/pyproject.toml": [ + # Update python version constraints + ( + r'(python\s*=\s*">=)' + + re.escape(old_version) + + r'(,\s*<\d+\.\d+")', + r"\g<1>" + new_major_minor + r"\g<2>", + ), + ], + "dev/*.py": [ + # Update version assignments + ( + r'(["\'])' + re.escape(old_version) + r'(\.\d+)?(["\'],?)\s*\n?', + lambda m: ( + "" if m.group(3) == "," else "" + ), # Remove version and handle comma if present + ), + ], + # Python files + "**/*.py": [ + # Update version assignments + ( + r'(["\'])' + re.escape(old_version) + r'(\.\d+)?(["\'])', + r"\g<1>" + new_full_version + r"\g<3>", + ), + ], + # Documentation files + "doc/source/conf.py": [ + # Update Python version in conf.py + ( + r"(\.\.\s*\|python_version\|\s*replace::\s*)" + + re.escape(old_version), + r"\g<1>" + new_major_minor, + ), + # Update Python full version in conf.py + ( + r"(\.\.\s*\|python_full_version\|\s*replace::\s*)" + + re.escape(old_version) + + r"\.\d+", + r"\g<1>" + new_full_version, + ), + ], + # ReStructuredText files + "doc/source/*.rst": [ + # Update Python version in rst files + ( + r"(`Python\s*" + + re.escape(old_version) + + r"\s*`_)", + r"`Python " + + new_major_minor + + " `_", + ), + ], + # PO files for localization + "doc/locales/*/LC_MESSAGES/framework-docs.po": [ + # Update Python version in localization files + ( + r"(`Python\s*" + + re.escape(old_version) + + r"\s*`_)", + r"`Python " + + new_major_minor + + " `_", + ), + ], + } + + # Process each file pattern + for file_pattern, patterns in replacements.items(): + for file_path in Path().rglob(file_pattern): + if not file_path.is_file(): + continue + content = file_path.read_text() + original_content = content + for pattern, repl in patterns: + if callable(repl): + content = re.sub(pattern, repl, content, flags=re.MULTILINE) + else: + content = re.sub(pattern, repl, content, flags=re.MULTILINE) + if content != original_content: + if dry_run: + print(f"Would update {file_path}") + else: + file_path.write_text(content) + print(f"Updated {file_path}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Script to update Python versions in the codebase." + ) + parser.add_argument( + "new_full_version", help="New full Python version to use (e.g., 3.9.22)" + ) + parser.add_argument( + "--patch-only", + action="store_true", + help="Update only the patch version for matching major.minor versions.", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Show changes without modifying files.", + ) + args = parser.parse_args() + + _update_python_versions( + new_full_version=args.new_full_version, + patch_only=args.patch_only, + dry_run=args.dry_run, + ) diff --git a/dev/update_version.py b/dev/update_version.py new file mode 100644 index 000000000000..cbb4d8e138c2 --- /dev/null +++ b/dev/update_version.py @@ -0,0 +1,150 @@ +"""Utility used to bump the version of the package.""" + +import argparse +import re +import sys +from pathlib import Path + + +REPLACE_CURR_VERSION = { + "doc/source/conf.py": [ + ".. |stable_flwr_version| replace:: {version}", + ], + "src/py/flwr/cli/new/templates/app/pyproject.*.toml.tpl": [ + "flwr[simulation]>={version}", + ], + "src/docker/complete/compose.yml": ["FLWR_VERSION:-{version}"], + "src/docker/distributed/client/compose.yml": ["FLWR_VERSION:-{version}"], + "src/docker/distributed/server/compose.yml": ["FLWR_VERSION:-{version}"], +} + +REPLACE_NEXT_VERSION = { + "pyproject.toml": ['version = "{version}"'], + "doc/source/conf.py": [ + 'release = "{version}"', + ], + "examples/doc/source/conf.py": ['release = "{version}"'], + "baselines/doc/source/conf.py": ['release = "{version}"'], +} + +EXAMPLES = { + "examples/*/pyproject.toml": [ + "flwr[simulation]=={version}", + "flwr[simulation]>={version}", + ], +} + + +def _get_next_version(curr_version, increment): + """Calculate the next version based on the type of release.""" + major, minor, patch_version = map(int, curr_version.split(".")) + if increment == "patch": + patch_version += 1 + elif increment == "minor": + minor += 1 + patch_version = 0 + elif increment == "major": + major += 1 + minor = 0 + patch_version = 0 + else: + raise ValueError( + "Invalid increment type. Must be 'major', 'minor', or 'patch'." + ) + return f"{major}.{minor}.{patch_version}" + + +def _update_versions(file_patterns, replace_strings, new_version, check): + """Update the version strings in the specified files.""" + wrong = False + for pattern in file_patterns: + files = list(Path(__file__).parents[1].glob(pattern)) + for file_path in files: + if not file_path.is_file(): + continue + content = file_path.read_text() + original_content = content + for s in replace_strings: + # Construct regex pattern to match any version number in the string + escaped_s = re.escape(s).replace(r"\{version\}", r"(\d+\.\d+\.\d+)") + regex_pattern = re.compile(escaped_s) + content = regex_pattern.sub(s.format(version=new_version), content) + if content != original_content: + wrong = True + if check: + print(f"{file_path} would be updated") + else: + file_path.write_text(content) + print(f"Updated {file_path}") + + return wrong + + +if __name__ == "__main__": + conf_path = Path("doc/source/conf.py") + + if not conf_path.is_file(): + raise FileNotFoundError(f"{conf_path} not found!") + + content = conf_path.read_text() + + # Search for the current non-updated version + match = re.search(r"\.\.\s*\|stable_flwr_version\|\s*replace::\s*(\S+)", content) + + parser = argparse.ArgumentParser( + description="Utility used to bump the version of the package." + ) + parser.add_argument( + "--old_version", + help="Current (non-updated) version of the package, soon to be the old version.", + default=match.group(1) if match else None, + ) + parser.add_argument( + "--check", action="store_true", help="Fails if any file would be modified." + ) + parser.add_argument( + "--examples", action="store_true", help="Also modify flwr version in examples." + ) + + group = parser.add_mutually_exclusive_group() + group.add_argument( + "--patch", action="store_true", help="Increment the patch version." + ) + group.add_argument( + "--major", action="store_true", help="Increment the major version." + ) + args = parser.parse_args() + + if not args.old_version: + raise ValueError("Version not found in conf.py, please provide current version") + + # Determine the type of version increment + if args.major: + increment = "major" + elif args.patch: + increment = "patch" + else: + increment = "minor" + + curr_version = _get_next_version(args.old_version, increment) + next_version = _get_next_version(curr_version, "minor") + + wrong = False + + # Update files with next version + for file_pattern, strings in REPLACE_NEXT_VERSION.items(): + if not _update_versions([file_pattern], strings, next_version, args.check): + wrong = True + + # Update files with current version + for file_pattern, strings in REPLACE_CURR_VERSION.items(): + if not _update_versions([file_pattern], strings, curr_version, args.check): + wrong = True + + if args.examples: + for file_pattern, strings in EXAMPLES.items(): + if not _update_versions([file_pattern], strings, curr_version, args.check): + wrong = True + + if wrong and args.check: + sys.exit("Some version haven't been updated.") diff --git a/dev/venv-create.sh b/dev/venv-create.sh index 63e82131d2fb..112f3a4b2917 100755 --- a/dev/venv-create.sh +++ b/dev/venv-create.sh @@ -2,7 +2,7 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} # Check if the directory for the Python version does not exist and if so, # install the right Python version through pyenv diff --git a/dev/venv-delete.sh b/dev/venv-delete.sh index 3a74d2fb8a4e..50bed76b203f 100755 --- a/dev/venv-delete.sh +++ b/dev/venv-delete.sh @@ -2,6 +2,6 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} pyenv uninstall -f flower-$version diff --git a/dev/venv-reset.sh b/dev/venv-reset.sh index 69713f7df62a..5ab05f29c137 100755 --- a/dev/venv-reset.sh +++ b/dev/venv-reset.sh @@ -2,7 +2,7 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../ -version=${1:-3.8.17} +version=${1:-3.9.20} # Delete caches, venv, and lock file ./dev/rm-caches.sh diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index efa10a69531c..a11f44f6bd59 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -13,48 +13,198 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Architecture florale" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "Moteur client Edge" +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "`Flower `_ architecture de base avec Edge Client Engine" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "Moteur de client virtuel" +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +#, fuzzy +msgid "Flower public API" +msgstr "Flower ClientApp." + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 +msgid "" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_ architecture de base avec moteur de client " -"virtuel" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "Moteur client virtuel et moteur client Edge dans la même charge de travail" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_ architecture de base avec un moteur de " -"client virtuel et un moteur de client périphérique" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +msgid "How to Build Docker Flower Images Locally" msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -74,31 +224,23 @@ msgid "" "development environment." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:12 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy -msgid "Clone the flower repository." +msgid "Clone the ``flower`` repository." msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" - -#: ../../source/contributor-how-to-build-docker-images.rst:25 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " "``src/docker``." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:24 msgid "" "Flower Docker images are configured via build arguments. Through build " "arguments, we can make the creation of images more flexible. For example," @@ -109,203 +251,229 @@ msgid "" "below." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:35 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy -msgid "Building the base image" +msgid "Building the Base Image" msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:41 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "Build argument" msgstr "Amélioration de la documentation" -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 #, fuzzy msgid "Description" msgstr "Dépréciations" -#: ../../source/contributor-how-to-build-docker-images.rst:43 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 #, fuzzy msgid "Required" msgstr "Changements nécessaires" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "Exemples de PyTorch" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 #, fuzzy msgid "No" msgstr "Aucun" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "``ubuntu``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:46 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -#, fuzzy -msgid "``22.04``" -msgstr "1.0.0rc1" +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:50 #, fuzzy msgid "``PYTHON_VERSION``" msgstr "Version Python" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "Version of ``python`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy msgid "Yes" msgstr "Types" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#, fuzzy -msgid "``23.0.1``" -msgstr "1.0.0rc1" +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:64 -#, fuzzy -msgid "``69.0.2``" -msgstr "``1.0.0b0``" +#: ../../source/contributor-how-to-build-docker-images.rst:61 +msgid ":substitution-code:`|setuptools_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "``FLWR_VERSION``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:63 msgid "Version of Flower to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:68 -#, fuzzy -msgid "``1.8.0``" -msgstr "``1.0.0b0``" +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:66 msgid "``FLWR_PACKAGE``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "The Flower package to be installed." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:70 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "Version Python" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:76 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "Demande pour un nouveau Flower Example" + +#: ../../source/contributor-how-to-build-docker-images.rst:78 msgid "" "The following example creates a base Ubuntu/Alpine image with Python " -"3.11.0, pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:93 msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:92 +#: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "Démarrer le serveur" +msgid "Building a Flower Binary Image" +msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:108 msgid "``BASE_REPOSITORY``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:103 +#: ../../source/contributor-how-to-build-docker-images.rst:109 msgid "The repository name of the base image." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:105 +#: ../../source/contributor-how-to-build-docker-images.rst:111 msgid "``flwr/base``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:106 +#: ../../source/contributor-how-to-build-docker-images.rst:112 msgid "``BASE_IMAGE``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/contributor-how-to-build-docker-images.rst:113 #, fuzzy msgid "The Tag of the Flower base image." msgstr "Chargement des données" -#: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:111 +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image " -"with the official Flower base image:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:128 msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` build " -"argument." +"argument to ``flwr_base`` (as we've specified above)." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:140 msgid "After creating the image, we can test whether the image is working:" msgstr "" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "Demande pour un nouveau Flower Example" + #: ../../source/contributor-how-to-contribute-translations.rst:2 #, fuzzy msgid "Contribute translations" @@ -344,7 +512,7 @@ msgid "" "`_." msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_." msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -430,17 +598,17 @@ msgstr "" "formater le code ou exécuter des tests. À cette fin, nous utilisons " "l'extension VSCode Remote Containers. Qu'est-ce que c'est ?" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "L'extension Visual Studio Code Remote - Containers te permet d'utiliser " "un conteneur Docker comme environnement de développement complet. Elle te" @@ -453,7 +621,7 @@ msgstr "" " les outils, les bibliothèques ou les exécutions nécessaires pour " "travailler avec une base de code." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -469,7 +637,7 @@ msgstr "" "environnement de développement simplement en te connectant à un autre " "conteneur." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 #, fuzzy msgid "" "Source: `Official VSCode documentation " @@ -478,19 +646,19 @@ msgstr "" "Source : `Documentation officielle de VSCode " "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "Pour commencer" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "La configuration et le paramétrage du :code:`Dockerfile` ainsi que la " @@ -501,7 +669,7 @@ msgstr "" "`VSCode Containers Extension `_." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -518,7 +686,7 @@ msgstr "" "inférieur gauche de ta fenêtre VSCode et sélectionner l'option " "*(Re)Ouvrir le dossier dans le conteneur*." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" @@ -526,7 +694,7 @@ msgstr "" "Dans certains cas, ton installation peut être plus complexe. Pour ces " "cas-là, consulte les sources suivantes :" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 #, fuzzy msgid "" "`Developing inside a Container " @@ -537,7 +705,7 @@ msgstr "" "`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 #, fuzzy msgid "" "`Remote development in Containers " @@ -569,7 +737,7 @@ msgstr "" "supprimer ``poetry.lock`` (``rm poetry.lock``) avant d'exécuter ``poetry " "install``)." -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" @@ -577,7 +745,7 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (sans " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -585,7 +753,7 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" @@ -593,11 +761,11 @@ msgstr "" "Installez ``flwr`` à partir d'une copie locale du code source de Flower " "via ``pyproject.toml`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (sans extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -605,11 +773,11 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "Installez ``flwr`` à partir d'un fichier local via ``pyproject.toml`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" @@ -618,7 +786,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` (sans " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " @@ -627,7 +795,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (avec extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "Utiliser pip (recommandé sur Colab)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "Installe une pré-version de ``flwr`` depuis PyPI :" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U --pre flwr`` (sans les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "``pip install -U --pre 'flwr[simulation]'`` (avec les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." @@ -662,11 +830,11 @@ msgstr "" "Utilise l'une des commandes suivantes pour installer Flower directement à" " partir de GitHub." -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "Installez ``flwr`` à partir de la branche GitHub par défaut (``main``) :" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" @@ -674,21 +842,21 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git`` (sans les " "extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(avec les extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (avec les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "" "Installez ``flwr`` à partir d'une branche GitHub spécifique (``nom-" "branche``) :" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -696,19 +864,20 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@nom-branche`` " "(sans les extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 +#, fuzzy msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git``" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git``" -"@nom-de-la-branche'`` (avec des extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"``@nom-de-la-branche'`` (avec des extras)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "Ouvre les carnets Jupyter sur Google Colab" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 #, fuzzy msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" @@ -717,7 +886,7 @@ msgstr "" "Ouvrir le notebook ``doc/source/tutorial/Flower-1-Intro-to-FL-" "PyTorch.ipynb`` :" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" @@ -726,7 +895,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -735,7 +904,7 @@ msgstr "" "`nom-branche` en remplaçant `main` par `nom-branche` (juste après `blob`)" " :" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" @@ -744,21 +913,21 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -777,11 +946,11 @@ msgstr "" "Ce document décrit le processus de diffusion actuel, qui peut ou non " "changer à l'avenir." -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "Lors de la sortie" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " @@ -791,14 +960,14 @@ msgstr "" "Pour publier une nouvelle version de Flower, les choses suivantes doivent" " se produire (dans cet ordre) :" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -808,7 +977,7 @@ msgid "" "the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -817,83 +986,33 @@ msgid "" "artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:15 -#, fuzzy -msgid "Trigger the CI for building the Docker images." -msgstr "Démarrer le serveur" - -#: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a " -"``workflow_dispatch`` event in the GitHub CI. This can be done either " -"through the UI or via the GitHub CLI. The event requires only one input, " -"the Flower version, to be released." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:21 -#, fuzzy -msgid "**Via the UI**" -msgstr "**Review the PR**" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page " -"`_." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower " -"in the ``Version of Flower`` input field." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "" - #: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "Après la publication" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "Crée une demande de pull qui contient les modifications suivantes :" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "" "Mets à jour tous les fichiers qui contiennent le numéro de version actuel" " si nécessaire." -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "Ajoute une nouvelle section ``Unreleased`` dans ``changelog.md``." -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." @@ -901,15 +1020,15 @@ msgstr "" "Fusionne la pull request le jour même (c'est-à-dire avant qu'une nouvelle" " version nightly ne soit publiée sur PyPI)." -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "Publier une pré-version" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "Nom de la pré-version" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" @@ -918,39 +1037,39 @@ msgstr "" "Les préversions DOIVENT utiliser l'un des modèles de dénomination " "suivants :" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha : ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Bêta : ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Candidat à la publication (RC) : ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "Voici quelques exemples :" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "1.0.0rc1" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" @@ -958,11 +1077,11 @@ msgstr "" "Ceci est conforme au PEP-440 et aux recommandations de l'Autorité de " "l'emballage Python (PyPA) :" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1005,11 +1124,11 @@ msgstr "" "version stable) - si aucun problème n'apparaît, cette version deviendra " "la prochaine version stable" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "Bêta : fonctionnalité complète, autorisée à avoir des problèmes connus" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" "Alpha : les fonctionnalités ne sont pas complètes, les problèmes connus " @@ -1032,22 +1151,22 @@ msgstr "" "Anaconda. Tu peux suivre les instructions ou choisir la configuration que" " tu préfères." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Version Python" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 #, fuzzy msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -"Flower nécessite `Python 3.7 `_ ou plus, " -"nous recommandons `Python 3.8 `_." +"Flower nécessite `Python 3.9 `_ ou plus, " +"nous recommandons `Python 3.10 `_." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1055,12 +1174,12 @@ msgid "" "simulations." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 #, fuzzy msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Virutualenv avec Pyenv/Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_ pour plus de " "détails." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 #, fuzzy msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " @@ -1082,19 +1201,19 @@ msgstr "" "Une fois Pyenv mis en place, tu peux l'utiliser pour installer `Python " "Version 3.7 `_ ou supérieure :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "Crée le virtualenv avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "Active la virtualenv en exécutant la commande suivante :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "Virtualenv et la poésie" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1104,7 +1223,7 @@ msgstr "" "poetry.org/docs/>`_ pour gérer les dépendances. Après l'installation de " "Poetry, il te suffit de créer un environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" @@ -1112,15 +1231,16 @@ msgstr "" "Si tu ouvres un nouveau terminal, tu peux activer l'environnement virtuel" " précédemment créé avec la commande suivante :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "Virtualenv avec Anaconda" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "Si tu préfères utiliser Anaconda pour ton environnement virtuel, installe" @@ -1129,15 +1249,15 @@ msgstr "" "guide/install/index.html>`_. Après l'avoir configuré, tu peux créer un " "environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "et active l'environnement virtuel avec :" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "Et ensuite ?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1149,11 +1269,11 @@ msgstr "" msgid "Write documentation" msgstr "Rédiger de la documentation" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "Schéma du projet" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1164,7 +1284,7 @@ msgstr "" "reStructuredText (fichiers `.rst`) et Markdown (fichiers `.md`)." #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 #, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" @@ -1176,20 +1296,20 @@ msgstr "" "make html``, comme décrit plus bas), `Pandoc " "_` doit être installé sur le système." -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "Modifier une page existante" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "Modifier un fichier ``.rst`` (ou ``.md``) existant sous ``doc/source/``" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "Compilez les documents : ``cd doc``, puis ``poetry run make html``" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "" @@ -1228,11 +1348,11 @@ msgstr "" "quelques recommandations sur les points de départ pour augmenter tes " "chances de voir ton PR accepté dans la base de code de Flower." -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "Par où commencer" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " @@ -1243,25 +1363,25 @@ msgstr "" " non essentielles de la base de code. Les bons candidats pour commencer " "sont :" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" "Documentation : Qu'est-ce qui manque ? Qu'est-ce qui pourrait être " "exprimé plus clairement ?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Références : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "Exemples : voir ci-dessous." -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" @@ -1272,7 +1392,7 @@ msgstr "" "probablement consulter notre `guide de contribution pour les baselines " "`_." -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" "You should then check out the open `issues " @@ -1287,7 +1407,7 @@ msgstr "" " laquelle tu aimerais travailler et qui n'a pas d'assignés, n'hésite pas " "à te l'attribuer et à commencer à travailler dessus !" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1340,12 +1460,13 @@ msgstr "" "protocole SecAgg peut être considéré comme un cas particulier du " "protocole SecAgg+." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "L'abstraction :code:`SecAgg+`" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1356,8 +1477,8 @@ msgstr "" "dictionnaires python utilisés ont des clés de type int plutôt que de type" " ClientProxy." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" @@ -1365,11 +1486,12 @@ msgstr "" "Le serveur Flower exécutera et traitera les résultats reçus dans l'ordre " "suivant :" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "L'abstraction :code:`LightSecAgg`" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "Types" @@ -1386,7 +1508,7 @@ msgstr "" "de Flower mais qui n'ont pas l'habitude de contribuer à des projets " "GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " @@ -1399,15 +1521,15 @@ msgstr "" "contributors.html>`_ et des exemples de `bonnes premières contributions " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "Mise en place du référentiel" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**Créer un compte GitHub et configurer Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 #, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " @@ -1422,7 +1544,7 @@ msgstr "" "locale, tu peux suivre ce `guide `_ pour le mettre en place." -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " @@ -1432,7 +1554,7 @@ msgstr "" "contrôle des versions et la collaboration. Il permet à chacun de " "collaborer et de travailler de n'importe où sur des dépôts à distance." -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." @@ -1440,7 +1562,7 @@ msgstr "" "Si ce n'est pas déjà fait, tu devras créer un compte sur `GitHub " "`_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1452,15 +1574,15 @@ msgstr "" " des modifications localement et tu en gardes une trace à l'aide de Git, " "puis tu télécharges ton nouvel historique à nouveau sur GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**Fourche le dépôt de Flower**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 #, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" @@ -1469,7 +1591,7 @@ msgstr "" "étant connecté à ton compte GitHub) et cliquer sur le bouton ``Fork`` " "situé en haut à droite de la page." -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1482,11 +1604,11 @@ msgstr "" " devrais voir dans le coin supérieur gauche que tu es en train de " "regarder ta propre version de Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**Clonage de ton dépôt forké**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1498,7 +1620,7 @@ msgstr "" "forké, tu dois d'abord cliquer sur le bouton ``Code`` à droite, ce qui te" " permettra de copier le lien HTTPS du dépôt." -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" @@ -1507,7 +1629,7 @@ msgstr "" "machine, naviguer jusqu'à l'endroit où tu veux télécharger le référentiel" " et taper :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " @@ -1516,15 +1638,15 @@ msgstr "" "Cela créera un dossier `flower/` (ou le nom de ta fourche si tu l'as " "renommée) dans le répertoire de travail actuel." -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**Ajouter l'origine**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "Tu peux ensuite aller dans le dossier du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1536,7 +1658,7 @@ msgstr "" "indiqué précédemment en allant sur notre dépôt fork sur notre compte " "GitHub et en copiant le lien." -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" @@ -1544,11 +1666,11 @@ msgstr "" "Une fois que le \\ est copié, nous pouvons taper la commande " "suivante dans notre terminal :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**Ajouter en amont**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 #, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " @@ -1558,13 +1680,13 @@ msgstr "" "Toujours dans le même directroy, nous devons exécuter la commande " "suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" "Le schéma suivant explique visuellement ce que nous avons fait dans les " "étapes précédentes :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1578,7 +1700,7 @@ msgstr "" "simplement l'adresse distante GitHub du dépôt forké que nous avons créé, " "c'est-à-dire la copie (fork) dans notre propre compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" @@ -1587,11 +1709,11 @@ msgstr "" "dernières modifications du dépôt Flower, nous pouvons exécuter la " "commande suivante :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "Mise en place de l'environnement de codage" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " @@ -1604,11 +1726,11 @@ msgstr "" "fois que tu es capable d'écrire du code et de le tester, tu peux enfin " "commencer à faire des changements !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "Apporter des changements" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" @@ -1616,15 +1738,15 @@ msgstr "" "Avant de faire des changements, assure-toi que tu es à jour avec ton " "référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "Et avec le référentiel de Flower :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**Créer une nouvelle branche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " @@ -1634,7 +1756,7 @@ msgstr "" "une bonne pratique de créer une nouvelle branche pour chaque " "fonctionnalité/projet qui doit être mis en œuvre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" @@ -1642,21 +1764,21 @@ msgstr "" "Pour ce faire, il suffit d'exécuter la commande suivante dans le " "répertoire du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**Apporter des modifications**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" "Écris du bon code et crée de merveilleuses modifications à l'aide de ton " "éditeur préféré !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**Teste et mets en forme ton code**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " @@ -1666,15 +1788,15 @@ msgstr "" "pourra pas être fusionné dans le dépôt Flower, et ce, afin que la base de" " code reste cohérente et facile à comprendre." -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "Pour ce faire, nous avons écrit quelques scripts que tu peux exécuter :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**Changements de scène**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." @@ -1682,48 +1804,51 @@ msgstr "" "Avant de créer un commit qui mettra à jour ton historique, tu dois " "spécifier à Git les fichiers qu'il doit prendre en compte." -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "Cela peut se faire avec :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" "Pour vérifier quels fichiers ont été modifiés par rapport à la dernière " "version (last commit) et pour voir quels fichiers sont mis à disposition " "pour le commit, tu peux utiliser la commande :code:`git status`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**Commit changes**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "" "Une fois que tu as ajouté tous les fichiers que tu voulais livrer à " "l'aide de :code:`git add`, tu peux enfin créer ta livraison à l'aide de " "cette commande :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" "Le ``commit_message`` est là pour expliquer aux autres ce que fait le " "commit. Il doit être écrit dans un style impératif et être concis. Un " "exemple serait :code:`git commit -m \"Ajouter des images au README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**Pousser les changements vers la fourche**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " @@ -1734,7 +1859,7 @@ msgstr "" "moyen de le savoir à moins que nous ne poussions nos modifications vers " "l'adresse distante de notre origine :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." @@ -1742,15 +1867,15 @@ msgstr "" "Une fois que c'est fait, tu verras sur GitHub que ton repo forké a été " "mis à jour avec les modifications que tu as apportées." -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "Créer et fusionner une pull request (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**Créer le PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" @@ -1758,12 +1883,12 @@ msgstr "" "Une fois que tu as poussé les modifications, sur la page web GitHub de " "ton dépôt, tu devrais voir le message suivant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "Sinon, tu peux toujours trouver cette option dans la page `Branches`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " @@ -1772,13 +1897,13 @@ msgstr "" "Une fois que tu as cliqué sur le bouton `Compare & pull request`, tu " "devrais voir quelque chose de similaire à ceci :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" "En haut, tu as une explication de quelle branche sera fusionnée à quel " "endroit :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " @@ -1788,14 +1913,14 @@ msgstr "" "branche ``doc-fixes`` de mon dépôt forké à la branche ``main`` du dépôt " "Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1807,11 +1932,11 @@ msgstr "" "commentaires (qui ne seront pas rendus une fois le PR ouvert) pour te " "guider tout au long du processus." -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" @@ -1821,7 +1946,7 @@ msgstr "" "qui informera les réviseurs qu'un nouveau PR a été ouvert et qu'ils " "doivent le consulter pour le fusionner ou demander des modifications." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" @@ -1830,11 +1955,11 @@ msgstr "" " personne, tu as la possibilité de créer un brouillon de demande de " "traction :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**Faire de nouveaux changements**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" @@ -1844,11 +1969,11 @@ msgstr "" "toujours y pousser de nouveaux commits de la même manière qu'auparavant, " "en apportant des modifications à la branche associée au PR." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**Review the PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" @@ -1857,7 +1982,7 @@ msgstr "" " étant prêt, une révision des propriétaires de code sera automatiquement " "demandée :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." @@ -1865,11 +1990,11 @@ msgstr "" "Les propriétaires du code vont alors se pencher sur le code, poser des " "questions, demander des modifications ou valider le RP." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "La fusion sera bloquée s'il y a des changements demandés en cours." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" @@ -1877,11 +2002,11 @@ msgstr "" "Pour les résoudre, il suffit de pousser les changements nécessaires vers " "la branche associée au PR :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "Et résous la conversation :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." @@ -1889,11 +2014,11 @@ msgstr "" "Une fois que toutes les conversations ont été résolues, tu peux " "redemander un examen." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**Une fois que le PR est fusionné**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." @@ -1902,7 +2027,7 @@ msgstr "" " de modifications à demander, ils peuvent approuver le PR et le " "fusionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" @@ -1911,19 +2036,19 @@ msgstr "" "(un bouton devrait apparaître pour le faire) et aussi la supprimer " "localement en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "Ensuite, tu dois mettre à jour ton dépôt forké en faisant :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "Exemple de première contribution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "Problème" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 #, fuzzy msgid "" "For our documentation, we've started to use the `Diàtaxis framework " @@ -1932,7 +2057,7 @@ msgstr "" "Pour notre documentation, nous avons commencé à utiliser le cadre " "`Diàtaxis `_." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 #, fuzzy msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" @@ -1942,7 +2067,7 @@ msgstr "" "la phrase \"Comment faire pour...\", par exemple, \"Comment passer à " "Flower 1.0\"." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." @@ -1951,7 +2076,7 @@ msgstr "" "changer leur titre est (malheureusement) plus compliqué qu'on ne le " "pense." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 #, fuzzy msgid "" "This issue is about changing the title of a doc from present continuous " @@ -1960,7 +2085,7 @@ msgstr "" "Cette question porte sur le changement du titre d'un document du présent " "continu au présent simple." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 #, fuzzy msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " @@ -1970,21 +2095,21 @@ msgstr "" "remplacé par \"Sauvegarder la progression\". Est-ce que cela passe notre " "contrôle ?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "Before: \"How to saving progress\" ❌" msgstr "Avant : \"Comment sauvegarder les progrès\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 #, fuzzy msgid "After: \"How to save progress\" ✅" msgstr "Après : \"Comment sauvegarder la progression\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "Solution" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 #, fuzzy msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " @@ -1994,12 +2119,12 @@ msgstr "" "configuration de bout en bout. Après avoir cloné et configuré le repo " "Flower, voici ce que tu dois faire :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "Trouve le fichier source dans `doc/source`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " @@ -2008,7 +2133,7 @@ msgstr "" "Effectue la modification dans le fichier `.rst` (attention, les tirets " "sous le titre doivent être de la même longueur que le titre lui-même)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 #, fuzzy msgid "" "Build the docs and `check the result `_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "Renommer le fichier" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -2034,22 +2159,22 @@ msgstr "" "important** d'éviter cela, car briser des liens peut nuire à notre " "classement dans les moteurs de recherche." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 #, fuzzy msgid "Here's how to change the file name:" msgstr "Voici comment changer le nom du fichier :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "Change le nom du fichier en `save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "Ajouter une règle de redirection à `doc/source/conf.py`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" @@ -2058,11 +2183,11 @@ msgstr "" "Cela entraînera une redirection de `saving-progress.html` vers `save-" "progress.html`, les anciens liens continueront à fonctionner." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "Applique les changements dans le fichier d'index" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -2073,16 +2198,16 @@ msgstr "" "très important de mettre également à jour le fichier `index.rst`. C'est " "là que nous définissons toute l'arborescence de la barre de navigation." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "Trouve et modifie le nom du fichier dans `index.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "Open PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 #, fuzzy msgid "" "Commit the changes (commit messages are always imperative: \"Do " @@ -2091,36 +2216,36 @@ msgstr "" "Valide les modifications (les messages de validation sont toujours " "impératifs : \"Fais quelque chose\", dans ce cas \"Modifie...\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "Transmets les changements à ta fourchette" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "Attends qu'elle soit approuvée !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" "Félicitations 🥳 Tu es désormais officiellement une contributrice de " "Flower !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "Prochaines étapes" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" @@ -2128,37 +2253,37 @@ msgstr "" "Une fois que tu auras fait ton premier RP, et que tu voudras contribuer " "davantage, ne manque pas de consulter les sites suivants :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 #, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" "`Bonnes premières contributions `_, où vous devriez " "particulièrement regarder les contributions :code:`baselines`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "Annexe" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -2167,51 +2292,51 @@ msgid "" "verb in the imperative mood." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "Exemples de PyTorch" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 #, fuzzy msgid "Invalid examples:" msgstr "Exemples de PyTorch" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -2220,14 +2345,18 @@ msgid "Get started as a contributor" msgstr "Devenez un·e contributeur·ice" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "Prérequis" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 #, fuzzy -msgid "`Python 3.8 `_ or above" -msgstr "`Python 3.7 `_ ou plus" +msgid "`Python 3.9 `_ or above" +msgstr "`Python 3.10 `_ ou plus" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2246,7 +2375,7 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" @@ -2255,13 +2384,14 @@ msgstr "" "le supportent). Poetry est un outil qui support `PEP 517 " "`_." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "Setup de la machine" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" -msgstr "" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +#, fuzzy +msgid "Preliminaries" +msgstr "Principes" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 msgid "Some system-wide dependencies are needed." @@ -2277,113 +2407,113 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 #, fuzzy msgid "Create Flower Dev Environment" msgstr "Créer/Supprimer l'environment virtuel" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 #, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "" "Pour commencer, cloner la `repo Flower `_" " depuis GitHub::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " -"virtuel (avec :code:`Python 3.8.17` par défaut)::" +"virtuel (avec :code:`Python 3.9.20` par défaut)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 #, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" "Si vous n'avez pas :code:`pyenv` installé, vous pouvez utiliser le script" " suivant qui l'installera, le configurera et créera l'environnement " -"virtuel (avec :code:`Python 3.8.17` par défaut)::" +"virtuel (avec :code:`Python 3.9.20` par défaut)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" "Troisièmement, installez le paquet Flower en mode de développement ( " ":code :`pip install -e`) avec toutes les dépendances nécessaires :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "Scripts pratiques" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" "La repo de Flower contient un certain nombre de scripts de commodité pour" " rendre les tâches de développement récurrentes plus faciles et moins " "problématiques. Voir le sous-répertoire :code :`/dev` pour une liste " "complète. Les scripts suivants sont parmis les plus importants :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "Créer/Supprimer l'environment virtuel" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "Compiler les définitions ProtoBuf" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "Formatter le code" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "Vérifier le format et tester le code" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "Add a pre-commit hook" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2391,50 +2521,50 @@ msgid "" "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 msgid "There are multiple ways developers can use this:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "Exécuter les GitHub Actions (CI) localement" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 #, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "Il est possible d'exécuter l'ensemble des Github Actions sous leur " "environnement local en utilisant `Act _`." @@ -2442,7 +2572,7 @@ msgstr "" "fois installé, exécuter la commande suivante dans le dossier principale " "de Flower :" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." @@ -2450,17186 +2580,15319 @@ msgstr "" "Le workflow par défaut de Flower sera exécuté en configurant les machines" " Docker requises en arrière plan." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 #, fuzzy msgid "Build Release" msgstr "Inédit" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "" "Flower utilise Poetry pour construire les nouvelles versions. La commande" " nécessaire est comprise dans un script simple ::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "" "Les versions résultantes :code:`.whl` et :code:`.tar.gz` seront stockées " "dans le sous-répertoire:code:`/dist`." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 #, fuzzy msgid "Build Documentation" msgstr "Amélioration de la documentation" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:2 #, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "Collecte centralisée des données" + +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante avec `FedBN `_, une stratégie" -" de formation fédérée conçue pour les données non-identifiées. Nous " -"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " -"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " -"Lors de l'application de FedBN, seules quelques modifications sont " -"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " -"fédération `_." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "Formation centralisée" +#: ../../source/docker/enable-tls.rst:7 +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 -#, fuzzy +#: ../../source/docker/enable-tls.rst:12 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " -"Centralized To Federated `_. La seule chose à faire est de modifier " -"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" -"dessous :" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -"L'architecture du modèle définie dans la classe Net() est ajoutée avec " -"les couches de normalisation par lots en conséquence." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -"Tu peux maintenant exécuter ta charge de travail d'apprentissage " -"automatique :" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 -#, fuzzy +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un système d'apprentissage fédéré au sein de " -"FedBN, le système se compose d'un serveur et de deux clients." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "Formation fédérée" +#: ../../source/docker/enable-tls.rst:27 +#, fuzzy +msgid "SuperLink" +msgstr "flower-superlink" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:29 +msgid "" +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst #, fuzzy +msgid "Understanding the command" +msgstr "Entraîne le modèle" + +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "" + +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " -"`_, les parties suivantes sont faciles à suivre, seules " -"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " -":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " -"veuillez lire `Exemple : PyTorch - From Centralized To Federated " -"`. d'abord." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"the current working directory of the host machine as a read-only volume " +"at the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " -":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " -"directement." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -"Enfin, nous allons réviser notre logique *client* en modifiant " -":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " -"nous allons exclure les paramètres de normalisation des lots de la liste " -"des paramètres du modèle lors de l'envoi ou de la réception depuis le " -"serveur." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" +#: ../../source/docker/enable-tls.rst +msgid "directory." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras ton projet PyTorch " -"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" -" FedBN sur deux clients. Félicitations !" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "Prochaines étapes" +#: ../../source/docker/enable-tls.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 -#, fuzzy +#: ../../source/docker/enable-tls.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"Le code source complet de cet exemple se trouve ici " -"`_. Notre exemple est bien sûr un peu trop " -"simplifié parce que les deux clients chargent exactement le même ensemble" -" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " -"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " -"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " -"?" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "Exemple : JAX - Exécuter JAX Federated" - -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst #, fuzzy +msgid "inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" + +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" -" pour entraîner un modèle de régression linéaire sur un ensemble de " -"données scikit-learn. Nous structurerons l'exemple de la même manière que" -" notre présentation `PyTorch - De la centralisation à la fédération " -"`_. Tout d'abord, nous construisons une approche" -" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " -"avec JAX " -"`_." -" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"Avant de commencer à construire notre exemple JAX, nous devons installer " -"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " -":code:`flwr` :" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "Régression linéaire avec JAX" +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -"Nous commençons par une brève description du code d'entraînement " -"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" -" explication plus approfondie de ce qui se passe, jette un coup d'œil à " -"la documentation officielle `JAX `_." -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 -msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " -"composants nécessaires pour un apprentissage traditionnel (centralisé) de" -" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " -":code:`jaxlib` doivent être importés. En outre, nous devons importer " -":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " -"jeu de données et :code:`train_test_split` pour diviser le jeu de données" -" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " -"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " -"fédéré, ce qui sera fait plus tard." -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test mentionnés." -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 -msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." msgstr "" -"L'architecture du modèle (un modèle :code:`Régression linéaire` très " -"simple) est définie dans :code:`load_model()`." -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "the network." msgstr "" -"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," -" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " -":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" -" de perte est séparée puisque JAX prend des dérivés avec une fonction " -":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " -":code:`train()`)." -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst:72 +#, fuzzy +msgid "SuperNode" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:74 msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." -" La fonction prend tous les exemples de test et mesure la perte du modèle" -" de régression linéaire." -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " -"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " -"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " -"à :code:`train()`." -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -"Tu peux maintenant exécuter ta charge de travail (centralisée) de " -"régression linéaire JAX :" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " -"avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX rencontre Flower" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "Utiliser les conteneurs VS Code Remote" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"Le concept de fédération d'une charge de travail existante est toujours " -"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " -"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" -" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " -"clients.Les *clients* exécutent la formation et mettent à jour les " -"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " -"un tour du processus d'apprentissage fédéré, et nous répétons cette " -"opération pour plusieurs tours." -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " -"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" -" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " -"pour démarrer un serveur et lui demander d'effectuer trois cycles " -"d'apprentissage fédéré." -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "Nous pouvons déjà démarrer le *serveur* :" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" +msgstr "" + +#: ../../source/docker/enable-tls.rst:107 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -"Enfin, nous allons définir la logique de notre *client* dans " -":code:`client.py` et nous appuyer sur la formation JAX définie " -"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " -":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" -" les paramètres de notre modèle JAX :" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"L'implémentation d'un *client* Flower signifie essentiellement " -"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " -":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " -":code:`flwr.client.NumPyClient` et nous l'appellerons " -":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " -"implémenter que :code:`Client` si vous utilisez un framework avec une " -"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " -"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " -"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " -"paramètres du modèle, une méthode pour former le modèle, et une méthode " -"pour tester le modèle :" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (optional)`" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "règle les paramètres du modèle local reçus du serveur" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " -":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"location of the CA certificate file inside the container that the " +"SuperExec executor" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" + +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" msgstr "" -"récupère les paramètres du modèle et les renvoie sous forme de liste de " -":code:`ndarray` NumPy (ce qui correspond à ce que " -":code:`flwr.client.NumPyClient` attend)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" - -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/index.rst:4 msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -"mettre à jour les paramètres du modèle local avec les paramètres reçus du" -" serveur" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +#: ../../source/docker/index.rst:7 +msgid "" +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "Pour commencer" + +#: ../../source/docker/index.rst:19 +msgid "Running in Production" msgstr "" -"récupère les paramètres du modèle local mis à jour et les renvoie au " -"serveur" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`évaluer`" +#: ../../source/docker/index.rst:28 +#, fuzzy +msgid "Advanced Options" +msgstr "Options d'installation avancées" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/docker/index.rst:40 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "Serveur de Flower" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "renvoie la perte locale au serveur" +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/persist-superlink-state.rst:4 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -"La partie la plus difficile consiste à transformer les paramètres du " -"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " -"rendre compatibles avec `NumPyClient`." -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " -"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " -"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" -" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." +#: ../../source/docker/persist-superlink-state.rst:11 +msgid "" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/persist-superlink-state.rst:21 +msgid "" +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " -"les commandes suivantes" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/persist-superlink-state.rst:36 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " -"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " -"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 -#, fuzzy +#: ../../source/docker/pin-version.rst:2 +msgid "Pin a Docker Image to a Specific Version" +msgstr "" + +#: ../../source/docker/pin-version.rst:4 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -"Le code source de cet exemple a été amélioré au fil du temps et peut être" -" trouvé ici : `Quickstart JAX " -"`_. " -"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " -"chargent le même jeu de données." -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/pin-version.rst:14 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" -" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " -"ne pas ajouter d'autres clients ?" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "Exemple : PyTorch - De la centralisation à la fédération" +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 -msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +#: ../../source/docker/pin-version.rst:30 +msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" -"Ce tutoriel te montrera comment utiliser Flower pour construire une " -"version fédérée d'une charge de travail d'apprentissage automatique " -"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " -"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " -"présentons cette tâche d'apprentissage automatique avec une approche " -"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " -"PyTorch " -"`_. " -"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " -"exécuter l'entraînement de manière fédérée." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -"Nous commençons par une brève description du code d'entraînement CNN " -"centralisé. Si tu veux une explication plus approfondie de ce qui se " -"passe, jette un coup d'œil au tutoriel officiel `PyTorch " -"`_." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " -"composants requis pour une formation traditionnelle (centralisée) sur le " -"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " -"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " -"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " -"toutes ces importations telles quelles même lorsque nous ajouterons les " -"composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" -" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " -"L'architecture du modèle (un réseau neuronal convolutif très simple) est " -"définie dans :code:`class Net()`." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 -msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" msgstr "" -"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" -" test CIFAR-10. La fonction :code:`transform` normalise les données après" -" leur chargement." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -"Nous devons maintenant définir la formation (fonction :code:`train()`) " -"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " -"rétropropage, puis effectue une étape d'optimisation pour chaque lot " -"d'exemples de formation." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" msgstr "" -"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " -"fonction boucle sur tous les échantillons de test et mesure la perte du " -"modèle en fonction de l'ensemble des données de test." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -"Après avoir défini le chargement des données, l'architecture du modèle, " -"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" -" notre CNN sur CIFAR-10." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "Démarrer le serveur" + +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "Vérifier le format et tester le code" + +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " -"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" -" avons construit pour créer un simple système d'apprentissage fédéré " -"composé d'un serveur et de deux clients." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -"Le projet simple d'apprentissage automatique discuté dans la section " -"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," -" nous appelons cela l'apprentissage centralisé. Ce concept " -"d'apprentissage centralisé, comme le montre la section précédente, est " -"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " -"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " -"travail d'apprentissage automatique de manière fédérée, tu dois alors " -"changer la plupart de ton code et tout mettre en place à partir de zéro, " -"ce qui peut représenter un effort considérable." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/run-as-subprocess.rst:17 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Serveur de Flower" + +#: ../../source/docker/run-as-subprocess.rst:31 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " -"une configuration d'apprentissage fédéré sans avoir besoin d'une " -"réécriture majeure." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " -"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " -"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " -"clients. Les *clients* exécutent la formation et mettent à jour les " -"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" -" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " -"un tour du processus d'apprentissage fédéré et nous répétons cette " -"opération pour plusieurs tours." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " -"et nous appuyer sur la formation centralisée définie précédemment dans " -":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " -":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "renvoie la perte locale et la précision au serveur" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +msgid "The Docker daemon is running." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 -msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." msgstr "" -"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " -"utilisent les fonctions :code:`train()` et :code:`test()` définies " -"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " -"c'est que nous indiquons à Flower, par le biais de notre sous-classe " -":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " -"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " -"annotations de type pour te donner une meilleure compréhension des types " -"de données qui sont transmis." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy -msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" -msgstr "" -"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " -"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" -" données et ton modèle en utilisant :code:`cifar.py`. Démarre " -":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " -"la faisant pointer sur la même adresse IP que celle que nous avons " -"utilisée dans :code:`server.py` :" +msgid "Run the Quickstart Example" +msgstr "Demande pour un nouveau Flower Example" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " -"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " -"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -"Le code source complet de cet exemple : `PyTorch : From Centralized To " -"Federated (Code) `_. Notre exemple est, bien sûr, " -"un peu trop simplifié parce que les deux clients chargent exactement le " -"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " -"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " -"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " -"d'autres clients ?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 #, fuzzy -msgid "Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Build and start the services using the following command:" +msgstr "Active la virtualenv en exécutant la commande suivante :" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 +#, fuzzy msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 +msgid "pyproject.toml" msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 +#, fuzzy +msgid "Run the example:" +msgstr "Fédérer l'exemple" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 -#, fuzzy -msgid "Formal Definition" -msgstr "Compiler les définitions ProtoBuf" - -#: ../../source/explanation-differential-privacy.rst:26 -msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 -msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy -msgid "Differential Privacy in Machine Learning" -msgstr "Confidentialité différentielle" +msgid "Limitations" +msgstr "Simulation de moniteur" -#: ../../source/explanation-differential-privacy.rst:46 -msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." -msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "Quickstart Example" +msgstr "Démarrage rapide de JAX" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy -msgid "Differential Privacy in Federated Learning" -msgstr "Mise à l'échelle de l'apprentissage fédéré" +msgid "quickstart-fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "Aucun" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "Quickstart tutorials" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-jax" +msgstr "Démarrage rapide de JAX" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 +#, fuzzy msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-mlx" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#, fuzzy +msgid "quickstart-monai" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#, fuzzy +msgid "quickstart-pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#, fuzzy +msgid "quickstart-pytorch-lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "Démarrage rapide de PyTorch" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +#, fuzzy +msgid "quickstart-sklearn-tabular" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/set-environment-variables.rst:2 +#, fuzzy +msgid "Set Environment Variables" +msgstr "Mise en place de l'environnement de codage" + +#: ../../source/docker/set-environment-variables.rst:4 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 #, fuzzy -msgid "Central Differential Privacy" -msgstr "Confidentialité différentielle" +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "Démarrage rapide XGBoost" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 -msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 -msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -#, fuzzy -msgid "Local Differential Privacy" -msgstr "Confidentialité différentielle" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 -msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 -msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +"For production environments, you may have to use dedicated services to " +"obtain your certificates." msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -#, fuzzy -msgid "**References:**" -msgstr "Référence" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 #, fuzzy +msgid "Step 3: Start the Flower Server Components" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " -"language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -#, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." msgstr "" -"Andrew, Galen, et al. \"Differentially private learning with adaptive " -"clipping\" Advances in Neural Information Processing Systems 34 (2021) : " -"17455-17466." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "Évaluation fédérée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "Démarrer le serveur" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +"On your local machine, run the following command to start the client " +"components:" msgstr "" -"Il existe deux approches principales pour évaluer les modèles dans les " -"systèmes d'apprentissage fédérés : l'évaluation centralisée (ou côté " -"serveur) et l'évaluation fédérée (ou côté client)." -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "Évaluation centralisée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "Stratégies intégrées" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Serveur de Flower" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" msgstr "" -"Toutes les stratégies intégrées prennent en charge l'évaluation " -"centralisée en fournissant une fonction d'évaluation lors de " -"l'initialisation. Une fonction d'évaluation est une fonction qui peut " -"prendre les paramètres du modèle global actuel comme entrée et renvoyer " -"les résultats de l'évaluation :" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "Stratégies personnalisées" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#, fuzzy +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "Démarrage rapide de scikit-learn" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -"L'abstraction :code:`Strategy` fournit une méthode appelée " -":code:`evaluate` qui peut être directement utilisée pour évaluer les " -"paramètres du modèle global actuel. L'implémentation actuelle du serveur " -"appelle :code:`evaluate` après l'agrégation des paramètres et avant " -"l'évaluation fédérée (voir le paragraphe suivant)." - -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "Évaluation fédérée" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "Mise en œuvre de l'évaluation fédérée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -"L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " -"et peut être configurée côté serveur." -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "Configuration de l'évaluation fédérée" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 -msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Client de Flower" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" -"L'évaluation fédérée peut être configurée du côté du serveur. Les " -"stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +#, fuzzy +msgid "Quickstart with Docker" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " -"clients qui sera sélectionnée pour l'évaluation. Si " -":code:`fraction_evaluate` est défini à :code:`0.1` et que :code:`100` " -"clients sont connectés au serveur, alors :code:`10` sera sélectionné " -"aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " -"à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/docker/tutorial-quickstart-docker.rst:7 msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -"si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " -":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" -" sont connectés au serveur, alors :code:`20` clients seront sélectionnés " -"pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:107 -msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -":code:`min_available_clients` : un :code:`int` qui définit le nombre " -"minimum de clients qui doivent être connectés au serveur avant qu'un " -"cycle d'évaluation fédérée puisse commencer. Si moins de " -":code:`min_available_clients` sont connectés au serveur, le serveur " -"attendra que d'autres clients soient connectés avant de continuer à " -"échantillonner des clients pour l'évaluation." -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -":code:`on_evaluate_config_fn` : une fonction qui renvoie un dictionnaire " -"de configuration qui sera envoyé aux clients sélectionnés. Cette fonction" -" sera appelée à chaque tour et offre un moyen pratique de personnaliser " -"l'évaluation côté client depuis le côté serveur, par exemple pour " -"configurer le nombre d'étapes de validation effectuées." -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "Évaluer les mises à jour du modèle local pendant la formation" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#, fuzzy +msgid "Open your terminal and run:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -"Les paramètres du modèle peuvent également être évalués pendant la " -"formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " -"arbitraires sous forme de dictionnaire :" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "Exemple de code complet" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -"Pour un exemple de code complet qui utilise à la fois l'évaluation " -"centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " -"approche peut être appliquée aux charges de travail mises en œuvre dans " -"n'importe quel autre framework) : " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "Modèle FED" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "Table des matières" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[Table des matières](#table-of-contents)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[Résumé](#résumé)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[Motivation](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[Buts](#buts)" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +#, fuzzy +msgid "Step 3: Start the SuperNode" +msgstr "Démarrer le serveur" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[Non-objectifs](#non-objectifs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[Proposition](#proposition)" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[Inconvénients](#inconvénients)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[Alternatives envisagées](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[Annexe](#appendix)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "Résumé" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -#, fuzzy -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "[TODO - phrase 1 : résumé du problème]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -#, fuzzy -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "[TODO - phrase 2 : résumé de la solution]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "Motivation" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -#, fuzzy -msgid "\\[TODO\\]" -msgstr "[TODO]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "Objectifs" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "Non-objectifs" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "Proposition" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "Inconvénients" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "Alternatives envisagées" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -#, fuzzy -msgid "\\[Alternative 1\\]" -msgstr "[Alternative 1]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 +#: ../../source/docker/tutorial-quickstart-docker.rst:124 #, fuzzy -msgid "\\[Alternative 2\\]" -msgstr "[Alternative 2]" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Doc sur l'amélioration des fleurs" +msgid "Start the second container:" +msgstr "Démarrer le serveur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Métadonnées](#métadonnées)" +#: ../../source/docker/tutorial-quickstart-docker.rst:144 +msgid "" +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst:149 +msgid "" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "Flower ClientApp." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the Dockerfile" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"Une amélioration de la fleur est un processus de développement " -"standardisé pour" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"fournir une structure commune pour proposer des changements plus " -"importants" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "s'assurer que la motivation du changement est claire" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -"conserver les informations sur le projet dans un système de contrôle des " -"versions" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" msgstr "" -"documenter la motivation des changements qui ont un impact sur " -"l'utilisateur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"from the current working directory into the container's ``/app`` " +"directory." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -"s'assurer que les participants de la communauté peuvent mener à bien les " -"changements dans le cadre d'une ou plusieurs versions et que les parties " -"prenantes sont représentées de manière adéquate tout au long du processus" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "Par conséquent, un document d'amélioration combine des aspects de" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "from the ``pyproject.toml``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "une caractéristique, et un document de suivi des efforts" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "un document sur les exigences du produit" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "un document de conception" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -"en un seul fichier, qui est créé progressivement en collaboration avec la" -" communauté." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -"Pour les changements lointains ou les fonctionnalités proposées à Flower," -" une abstraction au-delà d'une simple question GitHub ou d'une demande de" -" tirage est nécessaire pour comprendre et communiquer les changements à " -"venir dans le projet." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -"L'objectif de ce processus est de réduire la quantité de \"connaissances " -"tribales\" dans notre communauté. En déplaçant les décisions des fils de " -"discussion Slack, des appels vidéo et des conversations de couloir vers " -"un artefact bien suivi, ce processus vise à améliorer la communication et" -" la découvrabilité." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." msgstr "" -"Si une amélioration doit être décrite par écrit ou verbalement à " -"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " -"créer un document d'amélioration." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -"De même, tout effort technique (refactorisation, changement architectural" -" majeur) qui aura un impact sur une grande partie de la communauté de " -"développement doit également être communiqué à grande échelle. Le " -"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" -" sur l'utilisateur ou l'opérateur type." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -"Pour les petits changements et ajouts, passer par le processus " -"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " -"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " -"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " -"le fonctionnement ou l'utilisation de Flower." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" -"Les améliorations sont différentes des demandes de fonctionnalités, car " -"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " -"défendues par les membres de la communauté." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +#, fuzzy +msgid "Start the first ClientApp container:" +msgstr "Utilisation du moteur du client virtuel" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" -" défini et un flux de travail pour examiner et stocker les documents " -"d'amélioration pour référence - le Doc d'amélioration." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Modèle de document d'amélioration" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -"Chaque document d'amélioration est fourni sous la forme d'un fichier " -"Markdown ayant la structure suivante" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " -"préambule YAML)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Titre (le même que dans les métadonnées)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table des matières (si nécessaire)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Contraintes/Cavats (facultatif)" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Détails de la conception (facultatif)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "Critères d'obtention du diplôme" +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#, fuzzy +msgid "Step 5: Start the SuperExec" +msgstr "Démarrer le serveur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "À titre de référence, ce document suit la structure ci-dessus." +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Métadonnées" +#: ../../source/docker/tutorial-quickstart-docker.rst:242 +msgid "" +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -"**numérofed** (Obligatoire) Le `numérofed` du dernier document " -"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " -"faire référence à d'autres propositions." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"**status** (obligatoire) L'état actuel de la proposition. Voir " -"[workflow](#workflow) pour les états possibles." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " -"s'agit simplement de l'identifiant GitHub." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -"**creation-date** (Obligatoire) Date à laquelle la proposition a été " -"soumise pour la première fois dans un RP." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" -" été modifiée de manière significative pour la dernière fois." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -"**see-also** (Facultatif) Une liste d'autres propositions qui sont " -"pertinentes par rapport à celle-ci." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +#, fuzzy +msgid "Start the SuperExec container:" +msgstr "Démarrer le serveur" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" msgstr "" -"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " -"remplace." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "Flux de travail" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " -"discussion ou d'une présentation au sein de la communauté. À ce titre, " -"elle a besoin d'un champion, généralement l'auteur, qui se charge de " -"l'amélioration. Cette personne doit également trouver des committers to " -"Flower prêts à examiner la proposition." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" -"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " -"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " -"document d'amélioration de la fleur, dans `enhancements`. Toutes les " -"améliorations commencent à l'état `provisionnel` dans le cadre d'une " -"demande d'extraction. Les discussions sont effectuées dans le cadre de " -"l'examen de la demande d'extraction." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -"Une fois qu'une amélioration a été examinée et approuvée, son statut " -"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " -"des demandes d'extension séparées. Ces demandes d'extension doivent " -"mentionner l'amélioration concernée dans leur description. Une fois " -"l'implémentation réalisée, le statut de la proposition passe à " -"`implémented`." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -"Sous certaines conditions, d'autres états sont possibles. Une " -"amélioration a les états suivants :" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 -msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." msgstr "" -"`provisoire` : L'amélioration a été proposée et est en cours de " -"définition. C'est l'état de départ pendant que la proposition est étoffée" -" et activement définie et discutée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`implementable` : L'amélioration a été examinée et approuvée." +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -"`implemented` : L'amélioration a été mise en œuvre et n'est plus " -"activement modifiée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -"`deferred` : L'amélioration est proposée mais n'est pas activement " -"travaillée." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +#, fuzzy +msgid "Step 7: Update the Application" +msgstr "Étape 3 : Sérialisation personnalisée" + +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" -" n'allait pas de l'avant." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +#, fuzzy +msgid "quickstart_docker/task.py" +msgstr "Démarrage rapide des Pandas" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +msgid "Stop the current ClientApp containers:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "Chargement des données" + +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " -"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" -" obstacle pour les éventuels nouveaux contributeurs." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -"Élargir le modèle de proposition au-delà de la description d'une seule " -"phrase actuellement requise dans le modèle de questions sur les " -"caractéristiques peut constituer une lourde charge pour les personnes " -"dont l'anglais n'est pas la langue maternelle." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "Questions sur GitHub" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" -" pourrait utiliser, par exemple, des balises pour les différencier et les" -" filtrer par rapport aux autres problèmes. Le principal problème concerne" -" la discussion et la révision d'une amélioration : les GitHub Issues " -"n'ont qu'un seul fil de discussion pour les commentaires. Les " -"améliorations ont généralement plusieurs fils de discussion en même temps" -" pour différentes parties de la documentation. La gestion de ces " -"multiples discussions peut être déroutante lorsque l'on utilise GitHub " -"Issues." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google Docs" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#, fuzzy +msgid "Where to Go Next" +msgstr "Par où commencer" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 -msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -"Les Google Docs permettent de multiplier les fils de discussion. Mais " -"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " -"à ce que la communauté puisse les découvrir. Une liste de liens vers " -"toutes les propositions doit être gérée et mise à la disposition de la " -"communauté. Par rapport à l'envoi de propositions dans le cadre du " -"référentiel de Flower, le risque de liens manquants est beaucoup plus " -"élevé." -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Doc pour l'amélioration des fleurs" +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 #, fuzzy -msgid "Aggregate evaluation results" -msgstr "Résultats globaux de l'évaluation." +msgid "Quickstart with Docker Compose" +msgstr "Démarrage rapide XGBoost" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "Agréger les résultats de l'évaluation personnalisée" - -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -"La même approche de personnalisation :code:`Stratégie` peut être utilisée" -" pour agréger les résultats d'évaluation personnalisés provenant de " -"clients individuels. Les clients peuvent renvoyer des mesures " -"personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -"Le serveur peut alors utiliser une stratégie personnalisée pour agréger " -"les mesures fournies dans ces dictionnaires :" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 +msgid "" +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Serveur de Flower" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 +msgid "" +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." -#: ../../source/how-to-authenticate-supernodes.rst:15 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 +msgid "" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 -msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 -msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:44 -msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:47 -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#, fuzzy +msgid "Step 4: Update the Application" +msgstr "Étape 3 : Sérialisation personnalisée" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "Nous pouvons déjà démarrer le *serveur* :" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 -msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "Conclusion" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:79 -msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/how-to-configure-clients.rst:2 -#, fuzzy -msgid "Configure clients" -msgstr "Configurer les clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" +msgstr "" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " -"configuration aux clients. Les valeurs de configuration peuvent être " -"utilisées à diverses fins. Elles constituent, par exemple, un moyen " -"populaire de contrôler les hyperparamètres côté client à partir du " -"serveur." - -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "Valeurs de configuration" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -"Les valeurs de configuration sont représentées sous forme de dictionnaire" -" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " -"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " -"dans d'autres langages). Voici un exemple de dictionnaire de " -"configuration en Python :" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -"Flower sérialise ces dictionnaires de configuration (ou *config dict* en " -"abrégé) dans leur représentation ProtoBuf, les transporte vers le client " -"à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -"Actuellement, il n'est pas possible d'envoyer directement des types de " -"collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" -" dans les dictionnaires de configuration. Il existe plusieurs solutions " -"pour envoyer des collections en tant que valeurs en les convertissant en " -"l'un des types de valeurs pris en charge (et en les reconvertissant du " -"côté client)." -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -"On peut, par exemple, convertir une liste de nombres à virgule flottante " -"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " -"de configuration, et enfin reconvertir la chaîne JSON en une liste de " -"nombres à virgule flottante sur le client." -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "Configuration par le biais de stratégies intégrées" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -"La façon la plus simple d'envoyer des valeurs de configuration aux " -"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " -"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" -" de configuration. Une fonction de configuration est une fonction que la " -"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " -"pour le tour en cours. Elle transmet ensuite le dictionnaire de " -"configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -"Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" -" la taille du lot que le client doit utiliser, (b) le cycle global actuel" -" de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " -"client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " -"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" -" :code:`on_fit_config_fn` :" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" +msgstr "" -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -"Il existe également une fonction `on_evaluate_config_fn` pour configurer " -"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " -"séparées car on peut vouloir envoyer différentes valeurs de configuration" -" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -"Les stratégies intégrées appellent cette fonction à chaque tour " -"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " -"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" -" à chaque tour nous permet de varier/changer le dict de config au cours " -"de tours consécutifs. Si nous voulions mettre en place un calendrier " -"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " -"locales au cours des derniers tours, nous pourrions faire ce qui suit :" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "Configuration des clients individuels" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" +msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -"Dans certains cas, il est nécessaire d'envoyer des valeurs de " -"configuration différentes à des clients différents." -#: ../../source/how-to-configure-clients.rst:89 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -"Ceci peut être réalisé en personnalisant une stratégie existante ou en " -"`mettant en œuvre une stratégie personnalisée à partir de zéro " -"`_. " -"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " -"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" -" au config dict d'un *seul client* (uniquement le premier client de la " -"liste, les autres clients de cette série ne recevant pas cette valeur de " -"configuration \"spéciale\") :" - -#: ../../source/how-to-configure-logging.rst:2 -#, fuzzy -msgid "Configure logging" -msgstr "Configurer les clients" -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/how-to-configure-logging.rst:13 -msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-configure-logging.rst:53 -msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " -"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " -"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " -"log ci-dessous est également enregistré mais préfixé avec " -":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "Loggez vos propres messages" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#, fuzzy +msgid "Restart the services:" +msgstr "Démarrer le serveur" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" +msgstr "" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" msgstr "" -#: ../../source/how-to-configure-logging.rst:130 -msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +msgid "Remove all services and volumes:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:2 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 #, fuzzy -msgid "Enable SSL connections" -msgstr "Collecte centralisée des données" +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "Démarrage rapide XGBoost" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +#: ../../source/docker/use-a-different-version.rst:2 +msgid "Use a Different Flower Version" msgstr "" -"Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " -"comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-enable-ssl-connections.rst:7 -#, fuzzy +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -"Un exemple de code complet démontrant une connexion sécurisée peut être " -"trouvé ici `_." -#: ../../source/how-to-enable-ssl-connections.rst:10 -#, fuzzy +#: ../../source/docker/use-a-different-version.rst:10 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"L'exemple de code est accompagné d'un fichier README.md qui t'expliquera " -"comment le démarrer. Bien qu'il soit déjà activé par SSL, il peut être " -"moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " -"une introduction plus approfondie sur le sujet." -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "Certificats" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "Exemple : FedBN dans PyTorch - De la centralisation à la fédération" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 #, fuzzy msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -"L'utilisation de connexions compatibles avec le protocole SSL nécessite " -"que des certificats soient transmis au serveur et au client. Pour les " -"besoins de ce guide, nous allons générer des certificats auto-signés. " -"Comme cela peut devenir assez complexe, nous allons te demander " -"d'exécuter le script dans :code:`examples/advanced-" -"tensorflow/certificates/generate.sh`" +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante avec `FedBN `_, une stratégie" +" de formation fédérée conçue pour les données non-identifiées. Nous " +"utilisons PyTorch pour former un réseau neuronal convolutif (avec des " +"couches de normalisation par lots) sur l'ensemble de données CIFAR-10. " +"Lors de l'application de FedBN, seules quelques modifications sont " +"nécessaires par rapport à `Exemple : PyTorch - De la centralisation à la " +"fédération `_." -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" +msgstr "Formation centralisée" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 #, fuzzy msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -"Cela générera les certificats dans :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"Tous les fichiers sont révisés sur la base de `Exemple : PyTorch - From " +"Centralized To Federated `_. La seule chose à faire est de modifier " +"le fichier appelé :code:`cifar.py`, la partie révisée est montrée ci-" +"dessous :" -#: ../../source/how-to-enable-ssl-connections.rst:31 -#, fuzzy +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." msgstr "" -"L'approche de la génération des certificats SSL dans cet exemple peut " -"servir d'inspiration et de point de départ, mais ne doit pas être " -"considérée comme complète pour les environnements de production." - -#: ../../source/how-to-enable-ssl-connections.rst:39 -#, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +"L'architecture du modèle définie dans la classe Net() est ajoutée avec " +"les couches de normalisation par lots en conséquence." -#: ../../source/how-to-enable-ssl-connections.rst:41 -#, fuzzy -msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" msgstr "" -"Nous allons maintenant montrer comment écrire un client qui utilise les " -"scripts générés précédemment :" +"Tu peux maintenant exécuter ta charge de travail d'apprentissage " +"automatique :" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" +"Jusqu'à présent, tout ceci devrait te sembler assez familier si tu as " +"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" +" avons construit pour créer un système d'apprentissage fédéré au sein de " +"FedBN, le système se compose d'un serveur et de deux clients." -#: ../../source/how-to-enable-ssl-connections.rst:54 -#, fuzzy -msgid "Client (SuperNode)" -msgstr "Codes d'état du client." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" +msgstr "Formation fédérée" -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 #, fuzzy msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -"Nous allons maintenant montrer comment écrire un client qui utilise les " -"scripts générés précédemment :" +"Si vous avez lu `Exemple : PyTorch - From Centralized To Federated " +"`_, les parties suivantes sont faciles à suivre, seules " +"les fonctions :code:`get_parameters` et :code:`set_parameters` dans " +":code:`client.py` ont besoin d'être révisées. Si ce n'est pas le cas, " +"veuillez lire `Exemple : PyTorch - From Centralized To Federated " +"`. d'abord." -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 #, fuzzy msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" -"En définissant :code:`root_certificates`, le client s'attend à recevoir " -"les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " -"utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " -"certificats sous forme de chaînes d'octets." +"Notre exemple consiste en un *serveur* et deux *clients*. Dans FedBN, " +":code:`server.py` reste inchangé, nous pouvons démarrer le serveur " +"directement." -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 #, fuzzy msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -"Tu devrais maintenant avoir appris à générer des certificats auto-signés " -"à l'aide du script donné, à démarrer un serveur compatible SSL et à " -"demander à un client d'établir une connexion sécurisée avec lui." +"Enfin, nous allons réviser notre logique *client* en modifiant " +":code:`get_parameters` et :code:`set_parameters` dans :code:`client.py`, " +"nous allons exclure les paramètres de normalisation des lots de la liste " +"des paramètres du modèle lors de l'envoi ou de la réception depuis le " +"serveur." -#: ../../source/how-to-enable-ssl-connections.rst:75 -#, fuzzy -msgid "Additional resources" -msgstr "Ressources supplémentaires" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "Tu peux maintenant ouvrir deux autres fenêtres de terminal et lancer" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -"Ces sources supplémentaires peuvent être pertinentes si tu souhaites " -"approfondir le sujet des certificats :" +"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " +"d'exécution avant de le faire) et tu verras ton projet PyTorch " +"(auparavant centralisé) exécuter l'apprentissage fédéré avec la stratégie" +" FedBN sur deux clients. Félicitations !" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" -msgstr "`Let's Encrypt `_" - -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" -msgstr "`certbot `_" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" +msgstr "Prochaines étapes" -#: ../../source/how-to-implement-strategies.rst:2 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 #, fuzzy -msgid "Implement strategies" -msgstr "Mettre en place des stratégies" - -#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -"L'abstraction de la stratégie permet de mettre en œuvre des stratégies " -"entièrement personnalisées. Une stratégie est essentiellement " -"l'algorithme d'apprentissage fédéré qui s'exécute sur le serveur. Les " -"stratégies décident comment échantillonner les clients, comment " -"configurer les clients pour la formation, comment agréger les mises à " -"jour et comment évaluer les modèles. Flower fournit quelques stratégies " -"intégrées qui sont basées sur la même API que celle décrite ci-dessous." +"Le code source complet de cet exemple se trouve ici " +"`_. Notre exemple est bien sûr un peu trop " +"simplifié parce que les deux clients chargent exactement le même ensemble" +" de données, ce qui n'est pas réaliste. Tu es maintenant prêt à " +"approfondir ce sujet. Pourquoi ne pas utiliser différents sous-ensembles " +"de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter d'autres clients " +"?" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr "L'abstraction :code:`Stratégie`" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "Exemple : PyTorch - De la centralisation à la fédération" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"Toutes les implémentations de stratégies sont dérivées de la classe de " -"base abstraite :code:`flwr.server.strategy.Strategy`, qu'il s'agisse " -"d'implémentations intégrées ou d'implémentations tierces. Cela signifie " -"que les implémentations de stratégies personnalisées ont exactement les " -"mêmes capacités à leur disposition que les implémentations intégrées." +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail d'apprentissage automatique " +"existante. Nous utilisons PyTorch pour entraîner un réseau neuronal " +"convolutif sur l'ensemble de données CIFAR-10. Tout d'abord, nous " +"présentons cette tâche d'apprentissage automatique avec une approche " +"d'entraînement centralisée basée sur le tutoriel `Deep Learning with " +"PyTorch " +"`_. " +"Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " +"exécuter l'entraînement de manière fédérée." -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -"L'abstraction de la stratégie définit quelques méthodes abstraites qui " -"doivent être mises en œuvre :" +"Nous commençons par une brève description du code d'entraînement CNN " +"centralisé. Si tu veux une explication plus approfondie de ce qui se " +"passe, jette un coup d'œil au tutoriel officiel `PyTorch " +"`_." -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -"La création d'une nouvelle stratégie implique la mise en œuvre d'une " -"nouvelle :code:`classe` (dérivée de la classe de base abstraite " -":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " -"précédemment :" - -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" - -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." - -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" +"Créons un nouveau fichier appelé :code:`cifar.py` avec tous les " +"composants requis pour une formation traditionnelle (centralisée) sur le " +"CIFAR-10. Tout d'abord, tous les paquets requis (tels que :code:`torch` " +"et :code:`torchvision`) doivent être importés. Tu peux voir que nous " +"n'importons aucun paquet pour l'apprentissage fédéré. Tu peux conserver " +"toutes ces importations telles quelles même lorsque nous ajouterons les " +"composants d'apprentissage fédéré à un moment ultérieur." -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " -"début d'une exécution. Il est chargé de fournir les paramètres initiaux " -"du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " -"d'un objet :code:`Parameters`)." +"Comme nous l'avons déjà mentionné, nous utiliserons l'ensemble de données" +" CIFAR-10 pour cette charge de travail d'apprentissage automatique. " +"L'architecture du modèle (un réseau neuronal convolutif très simple) est " +"définie dans :code:`class Net()`." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -"Les stratégies intégrées renvoient les paramètres initiaux fournis par " -"l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " -"peuvent être transmis à :code:`FedAvg` :" +"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" +" test CIFAR-10. La fonction :code:`transform` normalise les données après" +" leur chargement." -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -"Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " -"paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" -" paramètre n'est renvoyé par :code:`initialize_parameters` (c'est-à-dire " -":code:`None`), le serveur sélectionne au hasard un client et lui demande " -"de fournir ses paramètres. Il s'agit d'une fonction de commodité qui " -"n'est pas recommandée dans la pratique, mais qui peut être utile pour le " -"prototypage. Dans la pratique, il est recommandé de toujours utiliser " -"l'initialisation des paramètres du côté du serveur." +"Nous devons maintenant définir la formation (fonction :code:`train()`) " +"qui passe en boucle sur l'ensemble de la formation, mesure la perte, la " +"rétropropage, puis effectue une étape d'optimisation pour chaque lot " +"d'exemples de formation." -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" -"L'initialisation des paramètres côté serveur est un mécanisme puissant. " -"Elle peut être utilisée, par exemple, pour reprendre l'entraînement à " -"partir d'un point de contrôle précédemment sauvegardé. C'est également la" -" capacité fondamentale nécessaire pour mettre en œuvre des approches " -"hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " -"l'apprentissage fédéré." - -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr "La méthode :code:`configure_fit`" +"L'évaluation du modèle est définie dans la fonction :code:`test()`. La " +"fonction boucle sur tous les échantillons de test et mesure la perte du " +"modèle en fonction de l'ensemble des données de test." -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -":code:`configure_fit` est chargé de configurer le prochain tour de " -"formation. Que signifie *configurer* dans ce contexte ? Configurer un " -"tour signifie sélectionner des clients et décider des instructions à leur" -" envoyer. La signature de :code:`configure_fit` l'indique clairement :" +"Après avoir défini le chargement des données, l'architecture du modèle, " +"la formation et l'évaluation, nous pouvons tout mettre ensemble et former" +" notre CNN sur CIFAR-10." -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -"La valeur de retour est une liste de tuples, chacun représentant les " -"instructions qui seront envoyées à un client particulier. Les " -"implémentations de stratégies effectuent généralement les étapes " -"suivantes dans :code:`configure_fit` :" +"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " +"déjà utilisé PyTorch. Passons à l'étape suivante et utilisons ce que nous" +" avons construit pour créer un simple système d'apprentissage fédéré " +"composé d'un serveur et de deux clients." -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -"Utilise le :code:`client_manager` pour échantillonner au hasard tous les " -"clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " -"par un objet :code:`ClientProxy`)" +"Le projet simple d'apprentissage automatique discuté dans la section " +"précédente entraîne le modèle sur un seul ensemble de données (CIFAR-10)," +" nous appelons cela l'apprentissage centralisé. Ce concept " +"d'apprentissage centralisé, comme le montre la section précédente, est " +"probablement connu de la plupart d'entre vous, et beaucoup d'entre vous " +"l'ont déjà utilisé. Normalement, si tu veux exécuter des charges de " +"travail d'apprentissage automatique de manière fédérée, tu dois alors " +"changer la plupart de ton code et tout mettre en place à partir de zéro, " +"ce qui peut représenter un effort considérable." -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." msgstr "" -"Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " -"modèle global actuel :code:`parameters` et :code:`config` dict" +"Cependant, avec Flower, tu peux faire évoluer ton code préexistant vers " +"une configuration d'apprentissage fédéré sans avoir besoin d'une " +"réécriture majeure." -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -"Les implémentations plus sophistiquées peuvent utiliser " -":code:`configure_fit` pour mettre en œuvre une logique de sélection des " -"clients personnalisée. Un client ne participera à un tour que si le " -":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " -":code:`configure_fit`." +"Le concept est facile à comprendre. Nous devons démarrer un *serveur* et " +"utiliser le code dans :code:`cifar.py` pour les *clients* qui sont " +"connectés au *serveur*. Le *serveur* envoie les paramètres du modèle aux " +"clients. Les *clients* exécutent la formation et mettent à jour les " +"paramètres. Les paramètres mis à jour sont renvoyés au *serveur* qui fait" +" la moyenne de toutes les mises à jour de paramètres reçues. Ceci décrit " +"un tour du processus d'apprentissage fédéré et nous répétons cette " +"opération pour plusieurs tours." -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" -"La structure de cette valeur de retour offre beaucoup de souplesse à " -"l'utilisateur. Comme les instructions sont définies par client, des " -"instructions différentes peuvent être envoyées à chaque client, ce qui " -"permet d'élaborer des stratégies personnalisées pour former, par exemple," -" différents modèles sur différents clients, ou utiliser différents " -"hyperparamètres sur différents clients (via le dict :code:`config`)." +"Notre exemple consiste en un *serveur* et deux *clients*. Commençons par " +"configurer :code:`server.py`. Le *serveur* doit importer le paquet Flower" +" :code:`flwr`. Ensuite, nous utilisons la fonction :code:`start_server` " +"pour démarrer un serveur et lui demander d'effectuer trois cycles " +"d'apprentissage fédéré." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr "La méthode :code:`aggregate_fit` (agrégation)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" +msgstr "Nous pouvons déjà démarrer le *serveur* :" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" -" clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " -"dans :code:`configure_fit`." +"Enfin, nous allons définir notre logique *client* dans :code:`client.py` " +"et nous appuyer sur la formation centralisée définie précédemment dans " +":code:`cifar.py`. Notre *client* doit importer :code:`flwr`, mais aussi " +":code:`torch` pour mettre à jour les paramètres de notre modèle PyTorch :" -#: ../../source/how-to-implement-strategies.rst:258 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" -"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " -"que le serveur obtienne des résultats de tous les clients auxquels il a " -"envoyé des instructions (via :code:`configure_fit`). " -":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " -"aussi une liste de :code:`échecs`." +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" +" to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr ":code:`set_parameters`" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"set the model parameters on the local model that are received from the " +"server" +msgstr "règle les paramètres du modèle local reçus du serveur" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "" -":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " -"un dictionnaire de métriques agrégées. La valeur de retour " -":code:`Parameters` est facultative car :code:`aggregate_fit` peut décider" -" que les résultats fournis ne sont pas suffisants pour l'agrégation (par " -"exemple, trop d'échecs)." +"boucle sur la liste des paramètres du modèle reçus sous forme de NumPy " +":code:`ndarray`'s (pensez à la liste des couches du réseau neuronal)" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr "La méthode :code:`configure_evaluate` (en anglais)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -":code:`configure_evaluate` est chargé de configurer le prochain tour " -"d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " -"tour signifie sélectionner des clients et décider des instructions à leur" -" envoyer. La signature de :code:`configure_evaluate` l'indique clairement" -" :" +"récupère les paramètres du modèle et les renvoie sous forme de liste de " +":code:`ndarray` NumPy (ce qui correspond à ce que " +":code:`flwr.client.NumPyClient` attend)" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" msgstr "" -"La valeur de retour est une liste de tuples, chacun représentant les " -"instructions qui seront envoyées à un client particulier. Les " -"implémentations de stratégies effectuent généralement les étapes " -"suivantes dans :code:`configure_evaluate` :" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"update the parameters of the local model with the parameters received " +"from the server" msgstr "" -"Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " -"le modèle global actuel :code:`parameters` et :code:`config` dict" +"mettre à jour les paramètres du modèle local avec les paramètres reçus du" +" serveur" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" +msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" +msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 #, fuzzy -msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." -msgstr "" -"Les implémentations plus sophistiquées peuvent utiliser " -":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " -"des clients personnalisée. Un client ne participera à un tour que si le " -":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " -":code:`configure_evaluate`." +msgid "``evaluate``" +msgstr ":code:`évaluer`" -#: ../../source/how-to-implement-strategies.rst:287 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." -msgstr "" -"La structure de cette valeur de retour offre beaucoup de souplesse à " -"l'utilisateur. Comme les instructions sont définies par client, des " -"instructions différentes peuvent être envoyées à chaque client. Cela " -"permet aux stratégies personnalisées d'évaluer, par exemple, différents " -"modèles sur différents clients, ou d'utiliser différents hyperparamètres " -"sur différents clients (via le dict :code:`config`)." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" +msgstr "renvoie la perte locale et la précision au serveur" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " -"par les clients qui ont été sélectionnés et à qui l'on a demandé " -"d'évaluer dans :code:`configure_evaluate`." +"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " +"utilisent les fonctions :code:`train()` et :code:`test()` définies " +"précédemment dans :code:`cifar.py`. Ce que nous faisons vraiment ici, " +"c'est que nous indiquons à Flower, par le biais de notre sous-classe " +":code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " +"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " +"annotations de type pour te donner une meilleure compréhension des types " +"de données qui sont transmis." -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" -"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " -"que le serveur obtienne des résultats de tous les clients auxquels il a " -"envoyé des instructions (via :code:`configure_evaluate`). " -":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " -"mais aussi une liste d' :code:`échecs`." +"Il ne reste plus qu'à définir une fonction qui charge le modèle et les " +"données, crée un :code:`CifarClient` et démarre ce client. Tu charges tes" +" données et ton modèle en utilisant :code:`cifar.py`. Démarre " +":code:`CifarClient` avec la fonction :code:`fl.client.start_client()` en " +"la faisant pointer sur la même adresse IP que celle que nous avons " +"utilisée dans :code:`server.py` :" -#: ../../source/how-to-implement-strategies.rst:308 -msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" -" un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " -"est facultative car :code:`aggregate_evaluate` peut décider que les " -"résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " -"trop d'échecs)." - -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr "La méthode :code:`évaluer`" +"Tu peux maintenant ouvrir deux autres fenêtres de terminal et exécuter " +"les commandes suivantes" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -"le fait d'avoir :code:`evaluate` en plus de " -":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " -"stratégies d'effectuer des évaluations à la fois côté serveur et côté " -"client (fédéré)." +"dans chaque fenêtre (assure-toi que le serveur fonctionne avant de le " +"faire) et tu verras ton projet PyTorch (auparavant centralisé) exécuter " +"l'apprentissage fédéré sur deux clients. Félicitations !" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 +#, fuzzy msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -"La valeur de retour est à nouveau facultative parce que la stratégie peut" -" ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " -"parce que la méthode :code:`evaluate` définie par l'utilisateur peut ne " -"pas se terminer avec succès (par exemple, elle peut échouer à charger les" -" données de l'évaluation côté serveur)." - -#: ../../source/how-to-install-flower.rst:2 -#, fuzzy -msgid "Install Flower" -msgstr "Installer Flower" +"Le code source complet de cet exemple : `PyTorch : From Centralized To " +"Federated (Code) `_. Notre exemple est, bien sûr, " +"un peu trop simplifié parce que les deux clients chargent exactement le " +"même ensemble de données, ce qui n'est pas réaliste. Tu es maintenant " +"prêt à explorer davantage ce sujet. Pourquoi ne pas utiliser différents " +"sous-ensembles de CIFAR-10 sur chaque client ? Pourquoi ne pas ajouter " +"d'autres clients ?" -#: ../../source/how-to-install-flower.rst:6 +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 #, fuzzy -msgid "Python version" -msgstr "Version Python" - -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "Installe la version stable" +msgid "Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/explanation-differential-privacy.rst:4 +msgid "" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"Les versions stables sont disponibles sur `PyPI " -"`_: :" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " -"doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +#: ../../source/explanation-differential-privacy.rst:27 +msgid "" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy +msgid "Formal Definition" +msgstr "Compiler les définitions ProtoBuf" + +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/explanation-differential-privacy.rst:42 msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/explanation-differential-privacy.rst:47 +msgid "" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "Vérifie l'installation" - -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/explanation-differential-privacy.rst:56 #, fuzzy +msgid "Differential Privacy in Machine Learning" +msgstr "Confidentialité différentielle" + +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" -msgstr "" -"La commande suivante peut être utilisée pour vérifier si Flower a été " -"installé avec succès. Si tout a fonctionné, la version de Flower devrait " -"être imprimée sur la ligne de commande: :" - -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "Options d'installation avancées" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." +msgstr "" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/explanation-differential-privacy.rst:69 #, fuzzy -msgid "Install via Docker" -msgstr "Installer Flower" +msgid "Differential Privacy in Federated Learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/explanation-differential-privacy.rst:71 +msgid "" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "Installer la version pre-release" - -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/explanation-differential-privacy.rst:78 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -"Les nouvelles versions (éventuellement instables) de Flower sont parfois " -"disponibles en tant que versions préliminaires (alpha, bêta, release " -"candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/explanation-differential-privacy.rst:81 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, les " -"versions de ``flwr`` doivent être installées avec l'option " -"``simulation``: :" - -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "Installer la version nightly" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -"Les dernières modifications (potentiellement instables) de Flower sont " -"disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" -"nightly`` doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-monitor-simulation.rst:2 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 #, fuzzy -msgid "Monitor simulation" -msgstr "Simulation de moniteur" +msgid "Central Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/explanation-differential-privacy.rst:95 msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -"Flower te permet de surveiller les ressources du système pendant " -"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " -"est puissant et te permet de décider comment allouer les ressources par " -"manière de client et de limiter l'utilisation totale. Les informations " -"sur la consommation des ressources peuvent t'aider à prendre des " -"décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/explanation-differential-privacy.rst:104 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -"Les instructions spécifiques supposent que tu utilises macOS et que le " -"gestionnaire de paquets `Homebrew `_ est installé." - -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "Téléchargements" -#: ../../source/how-to-monitor-simulation.rst:16 -msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -"`Prometheus `_ est utilisé pour la collecte de " -"données, tandis que `Grafana `_ te permettra de " -"visualiser les données collectées. Ils sont tous deux bien intégrés à " -"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"Écrase les fichiers de configuration (selon ton appareil, il se peut " -"qu'il soit installé sur un chemin différent)." - -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "Si tu es sur un Mac M1, il devrait l'être :" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" +#: ../../source/explanation-differential-privacy.rst:126 +msgid "Clipping" msgstr "" -"Sur les appareils Mac Intel de la génération précédente, ce devrait être " -"le cas :" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " -"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:131 msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -"puis supprime tout le texte du fichier et colle une nouvelle " -"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " -"intervalles de temps à tes besoins :" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:133 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"Maintenant, après avoir édité la configuration de Prometheus, fais de " -"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " -"l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:137 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " -"configuration suivante comme précédemment." -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +#, fuzzy +msgid "Local Differential Privacy" +msgstr "Confidentialité différentielle" + +#: ../../source/explanation-differential-privacy.rst:143 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " -"suivi des métriques, maintenant, démarrons-le." -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "Suivi des mesures" +#: ../../source/explanation-differential-privacy.rst:152 +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:154 msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " -"surveillance que tu viens d'installer et de configurer." -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:158 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " -"démarres une simulation." -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "Maintenant, tu es prêt à commencer ta charge de travail." +#: ../../source/explanation-differential-privacy.rst:163 +msgid "" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -"Peu de temps après le début de la simulation, tu devrais voir les " -"journaux suivants dans ton terminal :" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "Tu peux tout regarder sur ``_ ." +#: ../../source/explanation-differential-privacy.rst:169 +#, fuzzy +msgid "**References:**" +msgstr "Référence" -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +#: ../../source/explanation-differential-privacy.rst:171 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" -" panneau de gauche, l'option la plus basse)." -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-differential-privacy.rst:173 +#, fuzzy msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " -"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " -"Ray n'est accessible que pendant la simulation. Une fois la simulation " -"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" -" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." +"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " +"language models\", arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-differential-privacy.rst:175 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" -" important car sinon ils bloqueront, par exemple, le port :code:`3000` " -"sur ta machine tant qu'ils seront en cours d'exécution." - -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "Allocation des ressources" -#: ../../source/how-to-monitor-simulation.rst:134 -msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "" -"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " -"efficacement les ressources du système aux clients de simulation de ton " -"côté." - -#: ../../source/how-to-monitor-simulation.rst:136 -msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +#: ../../source/explanation-differential-privacy.rst:177 +#, fuzzy +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " -"avec toutes les ressources disponibles sur le système, qu'elle partage " -"entre les clients. Cela ne signifie pas qu'elle les divise de manière " -"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " -"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " -"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " -"suit :" +"Andrew, Galen, et al. \"Differentially private learning with adaptive " +"clipping\" Advances in Neural Information Processing Systems 34 (2021) : " +"17455-17466." -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "Évaluation fédérée" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " -"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " -"toutes) :" +"Il existe deux approches principales pour évaluer les modèles dans les " +"systèmes d'apprentissage fédérés : l'évaluation centralisée (ou côté " +"serveur) et l'évaluation fédérée (ou côté client)." -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "Spécifions également la ressource pour un seul client." +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "Évaluation centralisée" + +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "Stratégies intégrées" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " -"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" -" lorsque les ressources le permettront." +"Toutes les stratégies intégrées prennent en charge l'évaluation " +"centralisée en fournissant une fonction d'évaluation lors de " +"l'initialisation. Une fonction d'évaluation est une fonction qui peut " +"prendre les paramètres du modèle global actuel comme entrée et renvoyer " +"les résultats de l'évaluation :" + +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" +msgstr "Stratégies personnalisées" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-federated-evaluation.rst:63 +#, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " -"ne fonctionneront pas simultanément. En définissant " -":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " -"les faire fonctionner simultanément. Fais attention à ne pas demander " -"plus de ressources que celles disponibles. Si tu as spécifié " -":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " -"as 2 GPU mais que tu as décidé d'en définir 1 dans " -":code:`ray_init_args`)." +"L'abstraction :code:`Strategy` fournit une méthode appelée " +":code:`evaluate` qui peut être directement utilisée pour évaluer les " +"paramètres du modèle global actuel. L'implémentation actuelle du serveur " +"appelle :code:`evaluate` après l'agrégation des paramètres et avant " +"l'évaluation fédérée (voir le paragraphe suivant)." -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "FAQ" +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" +msgstr "Évaluation fédérée" -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "Q : Je ne vois aucune mesure enregistrée." +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" +msgstr "Mise en œuvre de l'évaluation fédérée" -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/explanation-federated-evaluation.rst:74 +#, fuzzy msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." msgstr "" -"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" -" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " -"défaut). Modifie le délai pour qu'il corresponde à la période pendant " -"laquelle la simulation s'est déroulée." +"L'évaluation côté client se fait dans la méthode :code:`Client.evaluate` " +"et peut être configurée côté serveur." -#: ../../source/how-to-monitor-simulation.rst:218 -msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." -msgstr "" -"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " -"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" -" l'onglet Métriques dans Ray Dashboard." +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" +msgstr "Configuration de l'évaluation fédérée" -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" msgstr "" -"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " -"services en cours d'exécution" +"L'évaluation fédérée peut être configurée du côté du serveur. Les " +"stratégies intégrées prennent en charge les arguments suivants :" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-federated-evaluation.rst:113 +#, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -"Q : Je vois \"This site can't be reached\" quand je vais sur " -"``_." +":code:`fraction_evaluate` : un :code:`float` définissant la fraction de " +"clients qui sera sélectionnée pour l'évaluation. Si " +":code:`fraction_evaluate` est défini à :code:`0.1` et que :code:`100` " +"clients sont connectés au serveur, alors :code:`10` sera sélectionné " +"aléatoirement pour l'évaluation. Si :code:`fraction_evaluate` est défini " +"à :code:`0.0`, l'évaluation fédérée sera désactivée." -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-federated-evaluation.rst:118 +#, fuzzy msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " -"Prometheus." - -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "Ressources" +"si :code:`fraction_evaluate` est réglé sur :code:`0.1`, " +":code:`min_evaluate_clients` est réglé sur 20, et que :code:`100` clients" +" sont connectés au serveur, alors :code:`20` clients seront sélectionnés " +"pour l'évaluation." -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-federated-evaluation.rst:122 #, fuzzy msgid "" -"Ray Dashboard: ``_" +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" -"Tableau de bord Ray : ``_" +":code:`min_available_clients` : un :code:`int` qui définit le nombre " +"minimum de clients qui doivent être connectés au serveur avant qu'un " +"cycle d'évaluation fédérée puisse commencer. Si moins de " +":code:`min_available_clients` sont connectés au serveur, le serveur " +"attendra que d'autres clients soient connectés avant de continuer à " +"échantillonner des clients pour l'évaluation." -#: ../../source/how-to-monitor-simulation.rst:236 +#: ../../source/explanation-federated-evaluation.rst:127 #, fuzzy -msgid "Ray Metrics: ``_" +msgid "" +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -"Ray Metrics : ``_" +":code:`on_evaluate_config_fn` : une fonction qui renvoie un dictionnaire " +"de configuration qui sera envoyé aux clients sélectionnés. Cette fonction" +" sera appelée à chaque tour et offre un moyen pratique de personnaliser " +"l'évaluation côté client depuis le côté serveur, par exemple pour " +"configurer le nombre d'étapes de validation effectuées." -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" +msgstr "Évaluer les mises à jour du modèle local pendant la formation" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-federated-evaluation.rst:159 +#, fuzzy msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__. Supported architectures include " -"``amd64`` and ``arm64v8``." +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" +"Les paramètres du modèle peuvent également être évalués pendant la " +"formation. :code:`Client.fit` peut renvoyer des résultats d'évaluation " +"arbitraires sous forme de dictionnaire :" -#: ../../source/how-to-run-flower-using-docker.rst:8 -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "" +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" +msgstr "Exemple de code complet" -#: ../../source/how-to-run-flower-using-docker.rst:15 +#: ../../source/explanation-federated-evaluation.rst:203 +#, fuzzy msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" +"Pour un exemple de code complet qui utilise à la fois l'évaluation " +"centralisée et fédérée, voir l'*Exemple TensorFlow avancé* (la même " +"approche peut être appliquée aux charges de travail mises en œuvre dans " +"n'importe quel autre framework) : " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:21 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:27 +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Architecture florale" + +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:32 -#, fuzzy -msgid "Flower SuperLink" -msgstr "flower-superlink" - -#: ../../source/how-to-run-flower-using-docker.rst:35 -#, fuzzy -msgid "Quickstart" -msgstr "Démarrage rapide de JAX" - -#: ../../source/how-to-run-flower-using-docker.rst:37 -msgid "If you're looking to try out Flower, you can use the following command:" +#: ../../source/explanation-flower-architecture.rst:6 +msgid "" +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:43 +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:49 -msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:53 +#: ../../source/explanation-flower-architecture.rst:21 +#, fuzzy +msgid "Hub-and-spoke topology in federated learning" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:65 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:72 -msgid "Mounting a volume to store the state on the host system" +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:74 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a directory where you want to save the file" -" on your host system and a name for the database file. By default, the " -"SuperLink container runs with a non-root user called ``app`` with the " -"user ID ``49999``. It is recommended to create new directory and change " -"the user ID of the directory to ``49999`` to ensure the mounted directory" -" has the proper permissions. If you later want to delete the directory, " -"you can change the user ID back to the current user ID by running ``sudo " -"chown -R $USER:$(id -gn) state``." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:82 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"In the example below, we create a new directory, change the user ID and " -"tell Docker via the flag ``--volume`` to mount the local ``state`` " -"directory into the ``/app/state`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:95 -msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -#, fuzzy -msgid "Enabling SSL for secure connections" -msgstr "Collecte centralisée des données" - -#: ../../source/how-to-run-flower-using-docker.rst:102 +#: ../../source/explanation-flower-architecture.rst:43 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:106 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:110 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container. This allows the " -"SuperLink to access the files within the container. The ``ro`` stands for" -" ``read-only``. Docker volumes default to ``read-write``; that option " -"tells Docker to make the volume ``read-only`` instead. Finally, we pass " -"the names of the certificates and key file to the SuperLink with the " -"``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:128 -msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, " -"the mounted files and directories must have the proper permissions for " -"the user ID ``49999``. For example, to change the user ID of all files in" -" the ``certificates/`` directory, you can run ``sudo chown -R 49999:49999" -" certificates/*``." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:62 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Architecture florale" -#: ../../source/how-to-run-flower-using-docker.rst:134 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "Flower SuperNode" -msgstr "Serveur de Flower" +msgid "The basic Flower architecture for federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" -#: ../../source/how-to-run-flower-using-docker.rst:136 +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:141 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:147 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:155 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:159 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy -msgid "Clone the Flower repository." -msgstr "**Fourche le dépôt de Flower**" - -#: ../../source/how-to-run-flower-using-docker.rst:173 -msgid "Creating a SuperNode Dockerfile" -msgstr "" +msgid "Multi-tenancy federated learning architecture" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -msgid "Let's assume the following project layout:" -msgstr "" +#: ../../source/explanation-flower-architecture.rst:87 +#, fuzzy +msgid "Multi-tenancy federated learning architecture with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" -#: ../../source/how-to-run-flower-using-docker.rst:184 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:200 -msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:103 +#, fuzzy +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-run-flower-using-docker.rst:203 +#: ../../source/explanation-flower-architecture.rst:103 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:217 +#: ../../source/explanation-flower-architecture.rst:106 msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:226 +#: ../../source/explanation-flower-architecture.rst:115 #, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "Démarrer le serveur" +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "Stratégie de moyenne fédérée." -#: ../../source/how-to-run-flower-using-docker.rst:228 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:235 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:240 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "Démarrer le serveur" - -#: ../../source/how-to-run-flower-using-docker.rst:242 -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "Let's break down each part of this command:" +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -msgid "``docker run``: This is the command to run a new Docker container." +#: ../../source/explanation-flower-architecture.rst:121 +msgid "" +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:254 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -msgid "``--insecure``: This option enables insecure communication." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of " -"the SuperLinks Fleet" +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:269 +#: ../../source/explanation-flower-architecture.rst:146 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:273 -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "Modèle FED" -#: ../../source/how-to-run-flower-using-docker.rst:283 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "Table des matières" -#: ../../source/how-to-run-flower-using-docker.rst:285 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--root-certificates`` flag when starting " -"the container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[Table des matières](#table-of-contents)" -#: ../../source/how-to-run-flower-using-docker.rst:297 -#, fuzzy -msgid "Flower ServerApp" -msgstr "Serveur de Flower" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[Résumé](#résumé)" -#: ../../source/how-to-run-flower-using-docker.rst:299 -msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[Motivation](#motivation)" -#: ../../source/how-to-run-flower-using-docker.rst:301 -msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[Buts](#buts)" -#: ../../source/how-to-run-flower-using-docker.rst:304 -msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[Non-objectifs](#non-objectifs)" -#: ../../source/how-to-run-flower-using-docker.rst:309 -msgid "Creating a ServerApp Dockerfile" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[Proposition](#proposition)" -#: ../../source/how-to-run-flower-using-docker.rst:320 -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[Inconvénients](#inconvénients)" -#: ../../source/how-to-run-flower-using-docker.rst:324 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[Alternatives envisagées](#alternatives-considered)" -#: ../../source/how-to-run-flower-using-docker.rst:335 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[Annexe](#appendix)" + +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "Résumé" -#: ../../source/how-to-run-flower-using-docker.rst:343 +#: ../../source/fed/0000-20200102-fed-template.md:26 #, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "Démarrer le serveur" +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "[TODO - phrase 1 : résumé du problème]" -#: ../../source/how-to-run-flower-using-docker.rst:345 -msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:28 +#, fuzzy +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "[TODO - phrase 2 : résumé de la solution]" -#: ../../source/how-to-run-flower-using-docker.rst:352 -msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "Motivation" -#: ../../source/how-to-run-flower-using-docker.rst:357 +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 #, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "Démarrer le serveur" - -#: ../../source/how-to-run-flower-using-docker.rst:359 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "" +msgid "\\[TODO\\]" +msgstr "[TODO]" -#: ../../source/how-to-run-flower-using-docker.rst:371 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "Objectifs" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of " -"the SuperLinks Driver" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "Non-objectifs" -#: ../../source/how-to-run-flower-using-docker.rst:385 -msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "Proposition" -#: ../../source/how-to-run-flower-using-docker.rst:389 -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "Inconvénients" -#: ../../source/how-to-run-flower-using-docker.rst:399 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "Alternatives envisagées" -#: ../../source/how-to-run-flower-using-docker.rst:401 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--root-certificates`` flags when starting" -" the container." -msgstr "" +#: ../../source/fed/0000-20200102-fed-template.md:52 +#, fuzzy +msgid "\\[Alternative 1\\]" +msgstr "[Alternative 1]" -#: ../../source/how-to-run-flower-using-docker.rst:412 +#: ../../source/fed/0000-20200102-fed-template.md:56 #, fuzzy -msgid "Advanced Docker options" -msgstr "Options d'installation avancées" +msgid "\\[Alternative 2\\]" +msgstr "[Alternative 2]" -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Doc sur l'amélioration des fleurs" -#: ../../source/how-to-run-flower-using-docker.rst:417 -msgid "" -"Flower Docker images, by default, run with a non-root user " -"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is not" -" recommended unless it is necessary for specific tasks during the build " -"process. Always make sure to run the container as a non-root user in " -"production to maintain security best practices." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[Modèle de document d'amélioration](#enhancement-doc-template)" -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[Métadonnées](#métadonnées)" -#: ../../source/how-to-run-flower-using-docker.rst:424 -msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[Workflow](#workflow)" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub Issues](#github-issues)" -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[Google Docs](#google-docs)" -#: ../../source/how-to-run-flower-using-docker.rst:434 -msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the " -"``USER root`` directive within your Dockerfile." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" msgstr "" +"Une amélioration de la fleur est un processus de développement " +"standardisé pour" -#: ../../source/how-to-run-flower-using-docker.rst:454 -msgid "Using a different Flower version" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" msgstr "" +"fournir une structure commune pour proposer des changements plus " +"importants" -#: ../../source/how-to-run-flower-using-docker.rst:456 -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "s'assurer que la motivation du changement est claire" -#: ../../source/how-to-run-flower-using-docker.rst:460 -msgid "Pinning a Docker image to a specific version" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" msgstr "" +"conserver les informations sur le projet dans un système de contrôle des " +"versions" -#: ../../source/how-to-run-flower-using-docker.rst:462 -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" msgstr "" +"documenter la motivation des changements qui ont un impact sur " +"l'utilisateur" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "réserve les problèmes GitHub pour le suivi du travail en vol" -#: ../../source/how-to-run-flower-using-docker.rst:467 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" +"s'assurer que les participants de la communauté peuvent mener à bien les " +"changements dans le cadre d'une ou plusieurs versions et que les parties " +"prenantes sont représentées de manière adéquate tout au long du processus" -#: ../../source/how-to-run-flower-using-docker.rst:474 -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "Par conséquent, un document d'amélioration combine des aspects de" -#: ../../source/how-to-run-flower-using-docker.rst:483 -#, fuzzy -msgid "Setting environment variables" -msgstr "Mise en place de l'environnement de codage" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "une caractéristique, et un document de suivi des efforts" -#: ../../source/how-to-run-flower-using-docker.rst:485 -msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "un document sur les exigences du produit" -#: ../../source/how-to-run-simulations.rst:2 -#, fuzzy -msgid "Run simulations" -msgstr "Simulation de moniteur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "un document de conception" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +"into one file, which is created incrementally in collaboration with the " +"community." msgstr "" +"en un seul fichier, qui est créé progressivement en collaboration avec la" +" communauté." -#: ../../source/how-to-run-simulations.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" +"Pour les changements lointains ou les fonctionnalités proposées à Flower," +" une abstraction au-delà d'une simple question GitHub ou d'une demande de" +" tirage est nécessaire pour comprendre et communiquer les changements à " +"venir dans le projet." -#: ../../source/how-to-run-simulations.rst:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" +"L'objectif de ce processus est de réduire la quantité de \"connaissances " +"tribales\" dans notre communauté. En déplaçant les décisions des fils de " +"discussion Slack, des appels vidéo et des conversations de couloir vers " +"un artefact bien suivi, ce processus vise à améliorer la communication et" +" la découvrabilité." -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" +"Si une amélioration doit être décrite par écrit ou verbalement à " +"quelqu'un d'autre que l'auteur ou le développeur, il faut envisager de " +"créer un document d'amélioration." -#: ../../source/how-to-run-simulations.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" +"De même, tout effort technique (refactorisation, changement architectural" +" majeur) qui aura un impact sur une grande partie de la communauté de " +"développement doit également être communiqué à grande échelle. Le " +"processus d'amélioration est adapté à cela, même s'il n'aura aucun impact" +" sur l'utilisateur ou l'opérateur type." -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" +"Pour les petits changements et ajouts, passer par le processus " +"d'amélioration prendrait beaucoup de temps et serait inutile. Cela " +"inclut, par exemple, l'ajout de nouveaux algorithmes d'apprentissage " +"fédéré, car ceux-ci ne font qu'ajouter des fonctionnalités sans changer " +"le fonctionnement ou l'utilisation de Flower." -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." msgstr "" +"Les améliorations sont différentes des demandes de fonctionnalités, car " +"elles fournissent déjà un chemin tracé pour la mise en œuvre et sont " +"défendues par les membres de la communauté." -#: ../../source/how-to-run-simulations.rst:44 -#, fuzzy -msgid "VirtualClientEngine resources" -msgstr "Moteur de client virtuel" - -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" +"Une amélioration est capturée dans un fichier Markdown qui suit un modèle" +" défini et un flux de travail pour examiner et stocker les documents " +"d'amélioration pour référence - le Doc d'amélioration." -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "Modèle de document d'amélioration" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +"Each enhancement doc is provided as a Markdown file having the following " +"structure" msgstr "" +"Chaque document d'amélioration est fourni sous la forme d'un fichier " +"Markdown ayant la structure suivante" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" msgstr "" +"Métadonnées (comme [décrit ci-dessous](#metadata) sous la forme d'un " +"préambule YAML)" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "Titre (le même que dans les métadonnées)" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "Table des matières (si nécessaire)" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "Notes/Contraintes/Cavats (facultatif)" -#: ../../source/how-to-run-simulations.rst:89 -msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "Détails de la conception (facultatif)" -#: ../../source/how-to-run-simulations.rst:91 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "Critères d'obtention du diplôme" -#: ../../source/how-to-run-simulations.rst:94 -#, fuzzy -msgid "Simulation examples" -msgstr "Exemples de PyTorch" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "Stratégie de mise à niveau/rétrogradation (le cas échéant)" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "À titre de référence, ce document suit la structure ci-dessus." -#: ../../source/how-to-run-simulations.rst:98 -#, fuzzy -msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." -msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Métadonnées" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" +"**numérofed** (Obligatoire) Le `numérofed` du dernier document " +"d'amélioration de la fleur + 1. Avec ce numéro, il devient facile de " +"faire référence à d'autres propositions." -#: ../../source/how-to-run-simulations.rst:104 -#, fuzzy -msgid "Multi-node Flower simulations" -msgstr "Simulation de moniteur" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**titre** (obligatoire) Le titre de la proposition en langage clair." -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" -msgstr "" - -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." msgstr "" +"**status** (obligatoire) L'état actuel de la proposition. Voir " +"[workflow](#workflow) pour les états possibles." -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." msgstr "" +"**authors** (Obligatoire) Une liste des auteurs de la proposition, il " +"s'agit simplement de l'identifiant GitHub." -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." msgstr "" +"**creation-date** (Obligatoire) Date à laquelle la proposition a été " +"soumise pour la première fois dans un RP." -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." msgstr "" +"**dernière mise à jour** (Facultatif) La date à laquelle la proposition a" +" été modifiée de manière significative pour la dernière fois." -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." msgstr "" +"**see-also** (Facultatif) Une liste d'autres propositions qui sont " +"pertinentes par rapport à celle-ci." -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**replaces** (Facultatif) Une liste de propositions que celle-ci remplace." -#: ../../source/how-to-run-simulations.rst:117 -msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "" +"**superseded-by** (Facultatif) Une liste de propositions que celle-ci " +"remplace." -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "Flux de travail" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" - -#: ../../source/how-to-run-simulations.rst:124 +"L'idée à l'origine de l'amélioration doit déjà avoir fait l'objet d'une " +"discussion ou d'une présentation au sein de la communauté. À ce titre, " +"elle a besoin d'un champion, généralement l'auteur, qui se charge de " +"l'amélioration. Cette personne doit également trouver des committers to " +"Flower prêts à examiner la proposition." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" +"Les nouvelles améliorations sont enregistrées avec un nom de fichier de " +"la forme `NNN-YYYMMDD-enhancement-title.md`, `NNNN` étant le numéro du " +"document d'amélioration de la fleur, dans `enhancements`. Toutes les " +"améliorations commencent à l'état `provisionnel` dans le cadre d'une " +"demande d'extraction. Les discussions sont effectuées dans le cadre de " +"l'examen de la demande d'extraction." -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" +"Une fois qu'une amélioration a été examinée et approuvée, son statut " +"passe à `implémentable`. L'implémentation réelle est alors réalisée dans " +"des demandes d'extension séparées. Ces demandes d'extension doivent " +"mentionner l'amélioration concernée dans leur description. Une fois " +"l'implémentation réalisée, le statut de la proposition passe à " +"`implémented`." -#: ../../source/how-to-run-simulations.rst:132 -#, fuzzy -msgid "Considerations for simulations" -msgstr "Simulation de moniteur" - -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" msgstr "" +"Sous certaines conditions, d'autres états sont possibles. Une " +"amélioration a les états suivants :" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." msgstr "" +"`provisoire` : L'amélioration a été proposée et est en cours de " +"définition. C'est l'état de départ pendant que la proposition est étoffée" +" et activement définie et discutée." -#: ../../source/how-to-run-simulations.rst:141 -#, fuzzy -msgid "GPU resources" -msgstr "Ressources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`implementable` : L'amélioration a été examinée et approuvée." -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "" +"`implemented` : L'amélioration a été mise en œuvre et n'est plus " +"activement modifiée." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." msgstr "" +"`deferred` : L'amélioration est proposée mais n'est pas activement " +"travaillée." -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." msgstr "" +"`rejeté` : Les auteurs et les réviseurs ont décidé que cette amélioration" +" n'allait pas de l'avant." -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`withdrawn` : Les auteurs ont retiré l'amélioration." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "`replaced` : L'amélioration a été remplacée par une nouvelle amélioration." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" +"L'ajout d'un processus supplémentaire à ceux déjà fournis par GitHub " +"(Issues et Pull Requests) ajoute plus de complexité et peut constituer un" +" obstacle pour les éventuels nouveaux contributeurs." -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" +"Élargir le modèle de proposition au-delà de la description d'une seule " +"phrase actuellement requise dans le modèle de questions sur les " +"caractéristiques peut constituer une lourde charge pour les personnes " +"dont l'anglais n'est pas la langue maternelle." -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "Questions sur GitHub" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" +"Il est possible d'utiliser GitHub Issues pour ce type d'améliorations. On" +" pourrait utiliser, par exemple, des balises pour les différencier et les" +" filtrer par rapport aux autres problèmes. Le principal problème concerne" +" la discussion et la révision d'une amélioration : les GitHub Issues " +"n'ont qu'un seul fil de discussion pour les commentaires. Les " +"améliorations ont généralement plusieurs fils de discussion en même temps" +" pour différentes parties de la documentation. La gestion de ces " +"multiples discussions peut être déroutante lorsque l'on utilise GitHub " +"Issues." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "Google Docs" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" +"Les Google Docs permettent de multiplier les fils de discussion. Mais " +"comme les Google Docs sont hébergés en dehors du projet, il faut veiller " +"à ce que la communauté puisse les découvrir. Une liste de liens vers " +"toutes les propositions doit être gérée et mise à la disposition de la " +"communauté. Par rapport à l'envoi de propositions dans le cadre du " +"référentiel de Flower, le risque de liens manquants est beaucoup plus " +"élevé." + +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Doc pour l'amélioration des fleurs" -#: ../../source/how-to-run-simulations.rst:156 +#: ../../source/how-to-aggregate-evaluation-results.rst:2 #, fuzzy -msgid "TensorFlow with GPUs" -msgstr "Exemples de TensorFlow" +msgid "Aggregate evaluation results" +msgstr "Résultats globaux de l'évaluation." -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/how-to-run-simulations.rst:160 +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "Agréger les résultats de l'évaluation personnalisée" + +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" +"La même approche de personnalisation :code:`Stratégie` peut être utilisée" +" pour agréger les résultats d'évaluation personnalisés provenant de " +"clients individuels. Les clients peuvent renvoyer des mesures " +"personnalisées au serveur en renvoyant un dictionnaire :" -#: ../../source/how-to-run-simulations.rst:179 -#, fuzzy +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +"Le serveur peut alors utiliser une stratégie personnalisée pour agréger " +"les mesures fournies dans ces dictionnaires :" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -#, fuzzy -msgid "Save and load model checkpoints" -msgstr "Sauvegarde et chargement des points de contrôle PyTorch" +#: ../../source/how-to-authenticate-supernodes.rst:9 +msgid "" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -#, fuzzy -msgid "Model checkpointing" -msgstr "Point de contrôle du modèle" +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" +msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:14 +#, fuzzy msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -"Les mises à jour du modèle peuvent être conservées côté serveur en " -"personnalisant les méthodes :code:`Strategy`. L'implémentation de " -"stratégies personnalisées est toujours possible, mais dans de nombreux " -"cas, il peut être plus pratique de simplement personnaliser une stratégie" -" existante. L'exemple de code suivant définit une nouvelle " -":code:`SaveModelStrategy` qui personnalise la stratégie intégrée " -":code:`FedAvg` existante. En particulier, il personnalise " -":code:`aggregate_fit` en appelant :code:`aggregate_fit` dans la classe de" -" base (:code:`FedAvg`). Il continue ensuite à sauvegarder les poids " -"retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " -"(c'est-à-dire le serveur) :" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -#, fuzzy -msgid "Save and load PyTorch checkpoints" -msgstr "Sauvegarde et chargement des points de contrôle PyTorch" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 -#, fuzzy +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -"Comme dans l'exemple précédent, mais avec quelques étapes " -"supplémentaires, nous allons montrer comment stocker un point de contrôle" -" PyTorch en utilisant la fonction ``torch.save``. Tout d'abord, " -"``aggregate_fit`` renvoie un objet ``Parameters`` qui doit être " -"transformé en une liste de `ndarray`` NumPy, puis ceux-ci sont " -"transformés en ``state_dict`` PyTorch en suivant la structure de la " -"classe ``OrderedDict``." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -"Pour charger ta progression, il te suffit d'ajouter les lignes suivantes " -"à ton code. Note que cela va itérer sur tous les points de contrôle " -"sauvegardés et charger le plus récent :" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 -msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +#: ../../source/how-to-authenticate-supernodes.rst:28 +msgid "Enable node authentication in ``SuperLink``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "Passe à Flower 1.0" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:30 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -"Flower 1.0 est arrivé. En plus de nouvelles fonctionnalités, Flower 1.0 " -"fournit une base stable pour la croissance future. Par rapport à Flower " -"0.19 (et aux autres versions de la série 0.x), il y a quelques " -"changements qui nécessitent de modifier le code des projets de la série " -"0.x existants." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "Installer la mise à jour" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" +msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-authenticate-supernodes.rst:49 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -"Voici comment mettre à jour une installation existante vers Flower 1.0 en" -" utilisant soit pip soit Poetry :" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip : ajoute ``-U`` lors de l'installation." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"``python -m pip install -U flwr`` (lors de l'utilisation de " -"``start_server`` et ``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/how-to-authenticate-supernodes.rst:57 msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -"``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de " -"``start_simulation``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/how-to-authenticate-supernodes.rst:64 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" msgstr "" -"``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " -"``start_client``)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/how-to-authenticate-supernodes.rst:73 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " -"l'utilisation de ``start_simulation``)" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "Changements nécessaires" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." -msgstr "" -"Les changements de rupture suivants nécessitent des mises à jour " -"manuelles." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "Généralités" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-authenticate-supernodes.rst:85 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -"Passe tous les arguments comme des arguments de mots-clés (et non comme " -"des arguments de position). Voici un exemple :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 -msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" msgstr "" -"Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," -" FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -"Fleur 1.0 (arguments de mots-clés) : " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "Client" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "Conclusion" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-authenticate-supernodes.rst:102 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -"Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " -"en ``def get_parameters(self, config):``" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-configure-clients.rst:2 +#, fuzzy +msgid "Configure clients" +msgstr "Configurer les clients" + +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -"Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " -"``def get_parameters(self, ins : GetParametersIns):``" +"En plus des paramètres du modèle, Flower peut envoyer des valeurs de " +"configuration aux clients. Les valeurs de configuration peuvent être " +"utilisées à diverses fins. Elles constituent, par exemple, un moyen " +"populaire de contrôler les hyperparamètres côté client à partir du " +"serveur." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" +msgstr "Valeurs de configuration" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" -" ``start_simulation``. Voici un exemple :" +"Les valeurs de configuration sont représentées sous forme de dictionnaire" +" avec des clés `str`` et des valeurs de type `bool`, `bytes`, `double` " +"(float de précision 64 bits), `int`, ou `str` (ou des types équivalents " +"dans d'autres langages). Voici un exemple de dictionnaire de " +"configuration en Python :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -"Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}, ...)``" +"Flower sérialise ces dictionnaires de configuration (ou *config dict* en " +"abrégé) dans leur représentation ProtoBuf, les transporte vers le client " +"à l'aide de gRPC, puis les désérialise à nouveau en dictionnaires Python." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-configure-clients.rst:31 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Actuellement, il n'est pas possible d'envoyer directement des types de " +"collections (par exemple, ``Set``, ``List``, ``Map``) en tant que valeurs" +" dans les dictionnaires de configuration. Il existe plusieurs solutions " +"pour envoyer des collections en tant que valeurs en les convertissant en " +"l'un des types de valeurs pris en charge (et en les reconvertissant du " +"côté client)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" -"Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " -"``config=ServerConfig(...)`` (voir point précédent)" +"On peut, par exemple, convertir une liste de nombres à virgule flottante " +"en une chaîne JSON, puis envoyer la chaîne JSON à l'aide du dictionnaire " +"de configuration, et enfin reconvertir la chaîne JSON en une liste de " +"nombres à virgule flottante sur le client." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" +msgstr "Configuration par le biais de stratégies intégrées" + +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -"Supprime le paramètre ``force_final_distributed_eval`` des appels à " -"``start_server``. L'évaluation distribuée sur tous les clients peut être " -"activée en configurant la stratégie pour échantillonner tous les clients " -"pour l'évaluation après le dernier tour de formation." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" +"La façon la plus simple d'envoyer des valeurs de configuration aux " +"clients est d'utiliser une stratégie intégrée comme :code:`FedAvg`. Les " +"stratégies intégrées prennent en charge ce que l'on appelle les fonctions" +" de configuration. Une fonction de configuration est une fonction que la " +"stratégie intégrée appelle pour obtenir le dictionnaire de configuration " +"pour le tour en cours. Elle transmet ensuite le dictionnaire de " +"configuration à tous les clients sélectionnés au cours de ce tour." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-configure-clients.rst:49 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" -"Initialisation de la stratégie : si la stratégie repose sur les valeurs " -"par défaut de ``fraction_fit`` et ``fraction_evaluate``, fixer " -"manuellement ``fraction_fit`` et ``fraction_evaluate`` à `0.1``. Les " -"projets qui ne créent pas manuellement une stratégie (en appelant " -"``start_server` ou ``start_simulation`` sans passer une instance de " -"stratégie) doivent maintenant initialiser manuellement FedAvg avec " -"``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients` --> ``min_evaluate_clients``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +"Commençons par un exemple simple. Imaginons que nous voulions envoyer (a)" +" la taille du lot que le client doit utiliser, (b) le cycle global actuel" +" de l'apprentissage fédéré et (c) le nombre d'époques à former du côté " +"client. Notre fonction de configuration pourrait ressembler à ceci :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" msgstr "" -"Renommez ``rnd`` en ``server_round``. Cela a un impact sur plusieurs " -"méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," -" ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." +"Pour que les stratégies intégrées utilisent cette fonction, nous pouvons " +"la passer à ``FedAvg`` lors de l'initialisation en utilisant le paramètre" +" :code:`on_fit_config_fn` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "Côté client, nous recevons le dictionnaire de configuration dans ``fit`` :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -"Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]]:``" +"Il existe également une fonction `on_evaluate_config_fn` pour configurer " +"l'évaluation, qui fonctionne de la même manière. Ce sont des fonctions " +"séparées car on peut vouloir envoyer différentes valeurs de configuration" +" à `evaluate` (par exemple, pour utiliser une taille de lot différente)." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -"Flower 1.0 : ``def evaluate(server_round : int, parameters : NDArrays, " -"config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"Les stratégies intégrées appellent cette fonction à chaque tour " +"(c'est-à-dire à chaque fois que `Strategy.configure_fit` ou " +"`Strategy.configure_evaluate` s'exécute). Appeler `on_evaluate_config_fn`" +" à chaque tour nous permet de varier/changer le dict de config au cours " +"de tours consécutifs. Si nous voulions mettre en place un calendrier " +"d'hyperparamètres, par exemple, pour augmenter le nombre d'époques " +"locales au cours des derniers tours, nous pourrions faire ce qui suit :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "Stratégies personnalisées" +#: ../../source/how-to-configure-clients.rst:107 +#, fuzzy +msgid "The ``FedAvg`` strategy will call this function *every round*." +msgstr "La stratégie :code:`FedAvg` appellera cette fonction *à chaque tour*." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 -msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" -msgstr "" -"Le type du paramètre ``failures`` a changé de ``List[BaseException]`` à " -"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (dans " -"``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " -"BaseException]]`` (dans ``aggregate_evaluate``)" +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" +msgstr "Configuration des clients individuels" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-configure-clients.rst:112 msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +"In some cases, it is necessary to send different configuration values to " +"different clients." msgstr "" -"La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre :" +"Dans certains cas, il est nécessaire d'envoyer des valeurs de " +"configuration différentes à des clients différents." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-configure-clients.rst:115 +#, fuzzy msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" -"Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]]:``" +"Ceci peut être réalisé en personnalisant une stratégie existante ou en " +"`mettant en œuvre une stratégie personnalisée à partir de zéro " +"`_. " +"Voici un exemple absurde qui personnalise :code:`FedAvg` en ajoutant une " +"paire clé/valeur de configuration personnalisée ``\"hello\" : \"world\"``" +" au config dict d'un *seul client* (uniquement le premier client de la " +"liste, les autres clients de cette série ne recevant pas cette valeur de " +"configuration \"spéciale\") :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-configure-logging.rst:2 +#, fuzzy +msgid "Configure logging" +msgstr "Configurer les clients" + +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -"Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "Améliorations facultatives" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-configure-logging.rst:13 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -"En plus des changements nécessaires mentionnés ci-dessus, il existe un " -"certain nombre d'améliorations potentielles qui viennent d'être rendues " -"possibles :" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 -msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" msgstr "" -"Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " -"de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " -"serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " -"sont plus nécessaires." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-configure-logging.rst:37 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -"Configurez le délai d'attente de la ronde via ``start_simulation`` : " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "Aide supplémentaire" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" -"La plupart des `exemples de code Flower officiels " -"`_ sont déjà mis à " -"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " -"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " -"Flower `_ et utilise le canal " -"``#questions``." +"Avec ce qui précède, Flower enregistrera le log que vous voyez sur votre " +"terminal dans :code:`log.txt`. Ce fichier sera créé dans le répertoire " +"depuis lequel le code est exécuté. Si nous inspectons nous voyons que le " +"log ci-dessous est également enregistré mais préfixé avec " +":code:`identifier` sur chaque ligne :" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -#, fuzzy -msgid "Upgrade to Flower Next" -msgstr "Passe à Flower 1.0" +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" +msgstr "Loggez vos propres messages" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-configure-logging.rst:114 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 -#, fuzzy +#: ../../source/how-to-configure-logging.rst:142 msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" -"Voici comment mettre à jour une installation existante vers Flower 1.0 en" -" utilisant soit pip soit Poetry :" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:2 +#, fuzzy +msgid "Enable SSL connections" +msgstr "Collecte centralisée des données" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-enable-ssl-connections.rst:4 +#, fuzzy msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" +"Ce guide décrit comment démarrer un serveur Flower sécurisé par SSL et " +"comment un client Flower peut établir une connexion sécurisée avec lui." -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" +#: ../../source/how-to-enable-ssl-connections.rst:8 +#, fuzzy +msgid "" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" +"Un exemple de code complet démontrant une connexion sécurisée peut être " +"trouvé ici `_." -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -#, fuzzy -msgid "Using Poetry" -msgstr "Utiliser la poésie (recommandé)" - -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-enable-ssl-connections.rst:11 #, fuzzy msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " -"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " -"poetry.lock`` avant d'exécuter ``poetry install``)." +"L'exemple de code est accompagné d'un fichier README.md qui t'expliquera " +"comment le démarrer. Bien qu'il soit déjà activé par SSL, il peut être " +"moins descriptif sur la façon de procéder. Tiens-toi en à ce guide pour " +"une introduction plus approfondie sur le sujet." -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "Certificats" + +#: ../../source/how-to-enable-ssl-connections.rst:18 #, fuzzy msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" +msgstr "" +"L'utilisation de connexions compatibles avec le protocole SSL nécessite " +"que des certificats soient transmis au serveur et au client. Pour les " +"besoins de ce guide, nous allons générer des certificats auto-signés. " +"Comme cela peut devenir assez complexe, nous allons te demander " +"d'exécuter le script dans :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-enable-ssl-connections.rst:29 +#, fuzzy msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" +"Cela générera les certificats dans :code:`examples/advanced-" +"tensorflow/.cache/certificates`." -#: ../../source/how-to-upgrade-to-flower-next.rst:109 +#: ../../source/how-to-enable-ssl-connections.rst:32 #, fuzzy -msgid "|clientapp_link|_" -msgstr "client" - -#: ../../source/how-to-upgrade-to-flower-next.rst:110 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" +"L'approche de la génération des certificats SSL dans cet exemple peut " +"servir d'inspiration et de point de départ, mais ne doit pas être " +"considérée comme complète pour les environnements de production." -#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#: ../../source/how-to-enable-ssl-connections.rst:40 #, fuzzy -msgid "|serverapp_link|_" -msgstr "serveur" +msgid "Server (SuperLink)" +msgstr "flower-superlink" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-enable-ssl-connections.rst:42 +#, fuzzy msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" msgstr "" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" +#: ../../source/how-to-enable-ssl-connections.rst:52 +msgid "" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-enable-ssl-connections.rst:56 +#, fuzzy +msgid "Client (SuperNode)" +msgstr "Codes d'état du client." + +#: ../../source/how-to-enable-ssl-connections.rst:58 +#, fuzzy msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" +"Nous allons maintenant montrer comment écrire un client qui utilise les " +"scripts générés précédemment :" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-enable-ssl-connections.rst:67 +#, fuzzy msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" +"En définissant :code:`root_certificates`, le client s'attend à recevoir " +"les certificats racine codés en PEM sous forme de chaîne d'octets. Nous " +"utilisons à nouveau :code:`Path` pour simplifier la lecture de ces " +"certificats sous forme de chaînes d'octets." -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-enable-ssl-connections.rst:73 +#, fuzzy msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" +"Tu devrais maintenant avoir appris à générer des certificats auto-signés " +"à l'aide du script donné, à démarrer un serveur compatible SSL et à " +"demander à un client d'établir une connexion sécurisée avec lui." -#: ../../source/how-to-upgrade-to-flower-next.rst:201 +#: ../../source/how-to-enable-ssl-connections.rst:78 #, fuzzy -msgid "Simulation in CLI" -msgstr "Simulation de moniteur" +msgid "Additional resources" +msgstr "Ressources supplémentaires" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" +"Ces sources supplémentaires peuvent être pertinentes si tu souhaites " +"approfondir le sujet des certificats :" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 -msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" -msgstr "" +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" +msgstr "`Let's Encrypt `_" + +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" +msgstr "`certbot `_" + +#: ../../source/how-to-implement-strategies.rst:2 +#, fuzzy +msgid "Implement strategies" +msgstr "Mettre en place des stratégies" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" +"L'abstraction de la stratégie permet de mettre en œuvre des stratégies " +"entièrement personnalisées. Une stratégie est essentiellement " +"l'algorithme d'apprentissage fédéré qui s'exécute sur le serveur. Les " +"stratégies décident comment échantillonner les clients, comment " +"configurer les clients pour la formation, comment agréger les mises à " +"jour et comment évaluer les modèles. Flower fournit quelques stratégies " +"intégrées qui sont basées sur la même API que celle décrite ci-dessous." -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:11 +#, fuzzy +msgid "The ``Strategy`` abstraction" +msgstr "L'abstraction :code:`Stratégie`" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" +"Toutes les implémentations de stratégies sont dérivées de la classe de " +"base abstraite :code:`flwr.server.strategy.Strategy`, qu'il s'agisse " +"d'implémentations intégrées ou d'implémentations tierces. Cela signifie " +"que les implémentations de stratégies personnalisées ont exactement les " +"mêmes capacités à leur disposition que les implémentations intégrées." -#: ../../source/how-to-upgrade-to-flower-next.rst:319 -#, fuzzy +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" msgstr "" -"La plupart des `exemples de code Flower officiels " -"`_ sont déjà mis à " -"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " -"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " -"Flower `_ et utilise le canal " -"``#questions``." +"L'abstraction de la stratégie définit quelques méthodes abstraites qui " +"doivent être mises en œuvre :" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 +#: ../../source/how-to-implement-strategies.rst:67 #, fuzzy -msgid "Important" -msgstr "Changements importants :" - -#: ../../source/how-to-upgrade-to-flower-next.rst:328 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" +"La création d'une nouvelle stratégie implique la mise en œuvre d'une " +"nouvelle :code:`classe` (dérivée de la classe de base abstraite " +":code:`Stratégie`) qui met en œuvre les méthodes abstraites présentées " +"précédemment :" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" +msgstr "Le serveur Flower appelle ces méthodes dans l'ordre suivant :" -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." +msgstr "Les sections suivantes décrivent chacune de ces méthodes plus en détail." -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" +msgstr "La méthode :code:`initialize_parameters` (initialisation des paramètres)" + +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" +":code:`initialize_parameters` n'est appelé qu'une seule fois, au tout " +"début d'une exécution. Il est chargé de fournir les paramètres initiaux " +"du modèle global sous une forme sérialisée (c'est-à-dire sous la forme " +"d'un objet :code:`Parameters`)." -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-implement-strategies.rst:183 +#, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" msgstr "" +"Les stratégies intégrées renvoient les paramètres initiaux fournis par " +"l'utilisateur. L'exemple suivant montre comment les paramètres initiaux " +"peuvent être transmis à :code:`FedAvg` :" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" +#: ../../source/how-to-implement-strategies.rst:209 +#, fuzzy +msgid "" +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" +"Le serveur Flower appelle :code:`initialize_parameters`, qui renvoie les " +"paramètres passés à :code:`initial_parameters`, ou :code:`None`. Si aucun" +" paramètre n'est renvoyé par :code:`initialize_parameters` (c'est-à-dire " +":code:`None`), le serveur sélectionne au hasard un client et lui demande " +"de fournir ses paramètres. Il s'agit d'une fonction de commodité qui " +"n'est pas recommandée dans la pratique, mais qui peut être utile pour le " +"prototypage. Dans la pratique, il est recommandé de toujours utiliser " +"l'initialisation des paramètres du côté du serveur." -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" +"L'initialisation des paramètres côté serveur est un mécanisme puissant. " +"Elle peut être utilisée, par exemple, pour reprendre l'entraînement à " +"partir d'un point de contrôle précédemment sauvegardé. C'est également la" +" capacité fondamentale nécessaire pour mettre en œuvre des approches " +"hybrides, par exemple, pour affiner un modèle pré-entraîné à l'aide de " +"l'apprentissage fédéré." -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" +msgstr "La méthode :code:`configure_fit`" -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy +msgid "" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" +":code:`configure_fit` est chargé de configurer le prochain tour de " +"formation. Que signifie *configurer* dans ce contexte ? Configurer un " +"tour signifie sélectionner des clients et décider des instructions à leur" +" envoyer. La signature de :code:`configure_fit` l'indique clairement :" -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" msgstr "" +"La valeur de retour est une liste de tuples, chacun représentant les " +"instructions qui seront envoyées à un client particulier. Les " +"implémentations de stratégies effectuent généralement les étapes " +"suivantes dans :code:`configure_fit` :" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" +"Utilise le :code:`client_manager` pour échantillonner au hasard tous les " +"clients disponibles (ou un sous-ensemble d'entre eux) (chacun représenté " +"par un objet :code:`ClientProxy`)" -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy +msgid "" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" +"Associe chaque :code:`ClientProxy` au même :code:`FitIns` contenant le " +"modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-implement-strategies.rst:248 +#, fuzzy msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" +"Les implémentations plus sophistiquées peuvent utiliser " +":code:`configure_fit` pour mettre en œuvre une logique de sélection des " +"clients personnalisée. Un client ne participera à un tour que si le " +":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " +":code:`configure_fit`." -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-implement-strategies.rst:254 #, fuzzy -msgid "Order of execution" -msgstr "Dépréciations" - -#: ../../source/how-to-use-built-in-mods.rst:74 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" +"La structure de cette valeur de retour offre beaucoup de souplesse à " +"l'utilisateur. Comme les instructions sont définies par client, des " +"instructions différentes peuvent être envoyées à chaque client, ce qui " +"permet d'élaborer des stratégies personnalisées pour former, par exemple," +" différents modèles sur différents clients, ou utiliser différents " +"hyperparamètres sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" +msgstr "La méthode :code:`aggregate_fit` (agrégation)" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy +msgid "" +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" +":code:`aggregate_fit` est chargé d'agréger les résultats renvoyés par les" +" clients qui ont été sélectionnés et à qui on a demandé de s'entraîner " +"dans :code:`configure_fit`." -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" +"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " +"que le serveur obtienne des résultats de tous les clients auxquels il a " +"envoyé des instructions (via :code:`configure_fit`). " +":code:`aggregate_fit` reçoit donc une liste de :code:`résultats`, mais " +"aussi une liste de :code:`échecs`." -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy +msgid "" +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" +":code:`aggregate_fit` renvoie un objet :code:`Parameters` facultatif et " +"un dictionnaire de métriques agrégées. La valeur de retour " +":code:`Parameters` est facultative car :code:`aggregate_fit` peut décider" +" que les résultats fournis ne sont pas suffisants pour l'agrégation (par " +"exemple, trop d'échecs)." -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" +msgstr "La méthode :code:`configure_evaluate` (en anglais)" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" +":code:`configure_evaluate` est chargé de configurer le prochain tour " +"d'évaluation. Que signifie *configurer* dans ce contexte ? Configurer un " +"tour signifie sélectionner des clients et décider des instructions à leur" +" envoyer. La signature de :code:`configure_evaluate` l'indique clairement" +" :" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" msgstr "" +"La valeur de retour est une liste de tuples, chacun représentant les " +"instructions qui seront envoyées à un client particulier. Les " +"implémentations de stratégies effectuent généralement les étapes " +"suivantes dans :code:`configure_evaluate` :" -#: ../../source/how-to-use-differential-privacy.rst:2 +#: ../../source/how-to-implement-strategies.rst:309 #, fuzzy -msgid "Use Differential Privacy" -msgstr "Confidentialité différentielle" - -#: ../../source/how-to-use-differential-privacy.rst:3 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" +"Associe chaque :code:`ClientProxy` au même :code:`EvaluateIns` contenant " +"le modèle global actuel :code:`parameters` et :code:`config` dict" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-implement-strategies.rst:312 +#, fuzzy msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" +"Les implémentations plus sophistiquées peuvent utiliser " +":code:`configure_evaluate` pour mettre en œuvre une logique de sélection " +"des clients personnalisée. Un client ne participera à un tour que si le " +":code:`ClientProxy` correspondant est inclus dans la liste renvoyée par " +":code:`configure_evaluate`." -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" +"La structure de cette valeur de retour offre beaucoup de souplesse à " +"l'utilisateur. Comme les instructions sont définies par client, des " +"instructions différentes peuvent être envoyées à chaque client. Cela " +"permet aux stratégies personnalisées d'évaluer, par exemple, différents " +"modèles sur différents clients, ou d'utiliser différents hyperparamètres " +"sur différents clients (via le dict :code:`config`)." -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" +msgstr "La méthode :code:`aggregate_evaluate` (agréger_évaluer)" + +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" +":code:`aggregate_evaluate` est chargé d'agréger les résultats renvoyés " +"par les clients qui ont été sélectionnés et à qui l'on a demandé " +"d'évaluer dans :code:`configure_evaluate`." -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" +"Bien sûr, des échecs peuvent se produire, il n'y a donc aucune garantie " +"que le serveur obtienne des résultats de tous les clients auxquels il a " +"envoyé des instructions (via :code:`configure_evaluate`). " +":code:`aggregate_evaluate` reçoit donc une liste de :code:`résultats`, " +"mais aussi une liste d' :code:`échecs`." -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-implement-strategies.rst:346 #, fuzzy -msgid "Server-side Clipping" -msgstr "Logique côté serveur" - -#: ../../source/how-to-use-differential-privacy.rst:22 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" +":code:`aggregate_evaluate` renvoie un :code:`float` facultatif (perte) et" +" un dictionnaire de mesures agrégées. La valeur de retour :code:`float` " +"est facultative car :code:`aggregate_evaluate` peut décider que les " +"résultats fournis ne sont pas suffisants pour l'agrégation (par exemple, " +"trop d'échecs)." -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:352 #, fuzzy -msgid "server side clipping" -msgstr "Logique côté serveur" +msgid "The ``evaluate`` method" +msgstr "La méthode :code:`évaluer`" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" +"le fait d'avoir :code:`evaluate` en plus de " +":code:`configure_evaluate`/:code:`aggregate_evaluate` permet aux " +"stratégies d'effectuer des évaluations à la fois côté serveur et côté " +"client (fédéré)." -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-implement-strategies.rst:364 #, fuzzy -msgid "Client-side Clipping" -msgstr "Logique côté client" - -#: ../../source/how-to-use-differential-privacy.rst:53 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" +"La valeur de retour est à nouveau facultative parce que la stratégie peut" +" ne pas avoir besoin de mettre en œuvre l'évaluation côté serveur ou " +"parce que la méthode :code:`evaluate` définie par l'utilisateur peut ne " +"pas se terminer avec succès (par exemple, elle peut échouer à charger les" +" données de l'évaluation côté serveur)." -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-install-flower.rst:2 #, fuzzy -msgid "client side clipping" -msgstr "Logique côté client" +msgid "Install Flower" +msgstr "Installer Flower" -#: ../../source/how-to-use-differential-privacy.rst:63 -msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +#: ../../source/how-to-install-flower.rst:5 +#, fuzzy +msgid "Python version" +msgstr "Version Python" + +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" +msgstr "Installe la version stable" + +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +msgid "Using pip" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:80 -msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" msgstr "" +"Les versions stables sont disponibles sur `PyPI " +"`_: :" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" msgstr "" +"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr`` " +"doit être installé avec l'option ``simulation``: :" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" +#: ../../source/how-to-install-flower.rst:30 +msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-install-flower.rst:32 +msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-install-flower.rst:34 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "" - -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-install-flower.rst:42 msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -#, fuzzy -msgid "Use strategies" -msgstr "Stratégies personnalisées" - -#: ../../source/how-to-use-strategies.rst:4 -msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +#: ../../source/how-to-install-flower.rst:49 +msgid "or with ``mamba``:" msgstr "" -"Flower permet une personnalisation complète du processus d'apprentissage " -"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " -"intégrées sont fournies dans le cadre principal." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" +msgstr "Vérifie l'installation" + +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" -"Il y a trois façons de personnaliser la manière dont Flower orchestre le " -"processus d'apprentissage du côté du serveur :" +"La commande suivante peut être utilisée pour vérifier si Flower a été " +"installé avec succès. Si tout a fonctionné, la version de Flower devrait " +"être imprimée sur la ligne de commande: :" -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" +msgstr "Options d'installation avancées" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "Personnalise une stratégie existante avec des fonctions de rappel" +#: ../../source/how-to-install-flower.rst:71 +#, fuzzy +msgid "Install via Docker" +msgstr "Installer Flower" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "Mets en place une nouvelle stratégie" +#: ../../source/how-to-install-flower.rst:73 +msgid ":doc:`Run Flower using Docker `" +msgstr "" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "Utilise une stratégie existante" +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" +msgstr "Installer la version pre-release" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" msgstr "" -"Flower intègre un certain nombre de stratégies d'apprentissage fédéré " -"populaires. Une stratégie intégrée peut être instanciée comme suit :" +"Les nouvelles versions (éventuellement instables) de Flower sont parfois " +"disponibles en tant que versions préliminaires (alpha, bêta, release " +"candidate) avant que la version stable n'arrive : :" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" msgstr "" -"Cela crée une stratégie dont tous les paramètres sont laissés à leur " -"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " -"est généralement recommandé d'ajuster quelques paramètres lors de " -"l'instanciation :" +"Pour les simulations qui utilisent le moteur de client virtuel, les " +"versions de ``flwr`` doivent être installées avec l'option " +"``simulation``: :" + +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" +msgstr "Installer la version nightly" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" msgstr "" -"Les stratégies existantes offrent plusieurs façons de personnaliser leur " -"comportement. Les fonctions de rappel permettent aux stratégies d'appeler" -" le code fourni par l'utilisateur pendant l'exécution." - -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "Configurer l'adaptation et l'évaluation du client" +"Les dernières modifications (potentiellement instables) de Flower sont " +"disponibles sous forme de versions nocturnes: :" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" msgstr "" -"Le serveur peut transmettre de nouvelles valeurs de configuration au " -"client à chaque tour en fournissant une fonction à " -":code:`on_fit_config_fn`. La fonction fournie sera appelée par la " -"stratégie et doit renvoyer un dictionnaire de paires de valeurs de clés " -"de configuration qui seront envoyées au client. Elle doit renvoyer un " -"dictionnaire de valeurs de configuration arbitraires :code:`client.fit` " -"et :code:`client.evaluate` au cours de chaque tour d'apprentissage " -"fédéré." +"Pour les simulations qui utilisent le moteur de client virtuel, ``flwr-" +"nightly`` doit être installé avec l'option ``simulation``: :" + +#: ../../source/how-to-monitor-simulation.rst:2 +#, fuzzy +msgid "Monitor simulation" +msgstr "Simulation de moniteur" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-monitor-simulation.rst:4 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -"Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " -"configuration arbitraires du serveur au client, et changer poétiquement " -"ces valeurs à chaque tour, par exemple pour ajuster le taux " -"d'apprentissage. Le client recevra le dictionnaire renvoyé par le " -":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." +"Flower te permet de surveiller les ressources du système pendant " +"l'exécution de ta simulation. De plus, le moteur de simulation de Flower " +"est puissant et te permet de décider comment allouer les ressources par " +"manière de client et de limiter l'utilisation totale. Les informations " +"sur la consommation des ressources peuvent t'aider à prendre des " +"décisions plus intelligentes et à accélérer le temps d'exécution." -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" -"Comme pour :code:`on_fit_config_fn`, il existe aussi " -":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" -" à :code:`client.evaluate()`" +"Les instructions spécifiques supposent que tu utilises macOS et que le " +"gestionnaire de paquets `Homebrew `_ est installé." -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "Configuration de l'évaluation côté serveur" +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" +msgstr "Téléchargements" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -"L'évaluation côté serveur peut être activée en passant une fonction " -"d'évaluation à :code:`evaluate_fn`." +"`Prometheus `_ est utilisé pour la collecte de " +"données, tandis que `Grafana `_ te permettra de " +"visualiser les données collectées. Ils sont tous deux bien intégrés à " +"`Ray `_ que Flower utilise sous le capot." -#: ../../source/how-to-use-strategies.rst:89 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." msgstr "" -"L'écriture d'une stratégie entièrement personnalisée est un peu plus " -"complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide " -"`Implémentation des stratégies `_ pour " -"en savoir plus." - -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "Tutoriel" - -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "Quickstart tutorials" +"Écrase les fichiers de configuration (selon ton appareil, il se peut " +"qu'il soit installé sur un chemin différent)." -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "Guides" +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" +msgstr "Si tu es sur un Mac M1, il devrait l'être :" -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" +"Sur les appareils Mac Intel de la génération précédente, ce devrait être " +"le cas :" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "Explications" - -#: None:-1 -msgid "API reference" -msgstr "Référence pour l'API" - -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "Référence pour la documentation" - -#: ../../source/index.rst:153 -#, fuzzy -msgid "Contributor tutorials" -msgstr "Configuration du contributeur" - -#: ../../source/index.rst:160 -#, fuzzy -msgid "Contributor how-to guides" -msgstr "Guide pour les contributeurs" - -#: ../../source/index.rst:172 -#, fuzzy -msgid "Contributor explanations" -msgstr "Explications" +#: ../../source/how-to-monitor-simulation.rst:40 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "" +"Ouvre les fichiers de configuration respectifs et modifie-les. Selon ton " +"appareil, utilise l'une des deux commandes suivantes :" -#: ../../source/index.rst:178 -#, fuzzy -msgid "Contributor references" -msgstr "Configuration du contributeur" +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" +msgstr "" +"puis supprime tout le texte du fichier et colle une nouvelle " +"configuration Prometheus que tu vois ci-dessous. Tu peux adapter les " +"intervalles de temps à tes besoins :" -#: ../../source/index.rst:-1 +#: ../../source/how-to-monitor-simulation.rst:67 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" +"Maintenant, après avoir édité la configuration de Prometheus, fais de " +"même avec les fichiers de configuration de Grafana. Ouvre ces derniers à " +"l'aide de l'une des commandes suivantes, comme précédemment :" -#: ../../source/index.rst:2 -#, fuzzy -msgid "Flower Framework Documentation" -msgstr "Rédiger de la documentation" +#: ../../source/how-to-monitor-simulation.rst:78 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "" +"Ton éditeur de terminal devrait s'ouvrir et te permettre d'appliquer la " +"configuration suivante comme précédemment." -#: ../../source/index.rst:7 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." msgstr "" -"Bienvenue sur la documentation de Flower. `Flower `_ " -"est un framework de federated learning convivial et facile à utiliser." +"Félicitations, tu viens de télécharger tous les logiciels nécessaires au " +"suivi des métriques, maintenant, démarrons-le." -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "Rejoignez la communauté de Flower" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" +msgstr "Suivi des mesures" -#: ../../source/index.rst:13 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." msgstr "" -"Le communauté de Flower s'agrandit rapidement - on est un super groupe de" -" chercheurs, ingénieurs, étudiants, professionnels, académiques, et " -"autres hobbyistes." - -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "Join us on Slack" - -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower Framework" +"Avant de lancer ta simulation Flower, tu dois démarrer les outils de " +"surveillance que tu viens d'installer et de configurer." -#: ../../source/index.rst:25 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"Please include the following argument in your Python code when starting a" +" simulation." msgstr "" -"Ce guide utilisateur s'adresse à des chercheurs et des développeurs qui " -"veulent utiliser Flower pour transposer des workloads de Machine Learning" -" existantes dans un scenario fédéré. Un des buts de Flower est de rendre " -"cela le plus evident possible. Lisez la suite pour en apprendre plus." +"Tu dois inclure l'argument suivant dans ton code Python lorsque tu " +"démarres une simulation." -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "Tutoriels" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." +msgstr "Maintenant, tu es prêt à commencer ta charge de travail." -#: ../../source/index.rst:32 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" msgstr "" -"Une serie de tutoriels de Federated Learning, l'endroit parfait pour " -"débuter." +"Peu de temps après le début de la simulation, tu devrais voir les " +"journaux suivants dans ton terminal :" -#: ../../source/index.rst:61 +#: ../../source/how-to-monitor-simulation.rst:127 #, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." +msgstr "Tu peux tout regarder sur ``_ ." + +#: ../../source/how-to-monitor-simulation.rst:129 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." msgstr "" -"QUICKSTART TUTORIALS: :ref:`PyTorch ` | " -":ref:`TensorFlow ` | :ref:`🤗 Transformers " -"` | :ref:`JAX ` | :ref:`Pandas " -"` | :ref:`fastai ` | :ref:`PyTorch " -"Lightning ` | :ref:`MXNet ` | :ref:`scikit-learn ` | :ref:`XGBoost " -"` | :ref:`Android ` | :ref:`iOS " -"`" +"Il s'agit d'un tableau de bord Ray. Tu peux naviguer vers Metrics (sur le" +" panneau de gauche, l'option la plus basse)." -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" +"Ou alors, tu peux simplement les voir dans Grafana en cliquant sur le " +"coin supérieur droit, \"View in Grafana\". Sache que le tableau de bord " +"Ray n'est accessible que pendant la simulation. Une fois la simulation " +"terminée, tu ne peux utiliser Grafana que pour explorer les métriques. Tu" +" peux démarrer Grafana en te rendant sur `http://localhost:3000/``." -#: ../../source/index.rst:68 +#: ../../source/how-to-monitor-simulation.rst:137 #, fuzzy -msgid "And TensorFlow:" -msgstr "Exemples de TensorFlow" - -#: ../../source/index.rst:76 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" -"Guides orientés sur la résolutions étapes par étapes de problèmes ou " -"objectifs specifiques." +"Après avoir terminé la visualisation, arrête Prometheus et Grafana. C'est" +" important car sinon ils bloqueront, par exemple, le port :code:`3000` " +"sur ta machine tant qu'ils seront en cours d'exécution." + +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" +msgstr "Allocation des ressources" -#: ../../source/index.rst:110 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." msgstr "" -"Guides orientés sur la compréhension et l'explication des sujets et idées" -" de fonds sur lesquels sont construits Flower et l'IA collaborative." - -#: ../../source/index.rst:120 -#, fuzzy -msgid "References" -msgstr "Référence" - -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "Référence de l'API orientée sur l'information pure." +"Tu dois comprendre le fonctionnement de la bibliothèque Ray pour allouer " +"efficacement les ressources du système aux clients de simulation de ton " +"côté." -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-monitor-simulation.rst:152 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" +"Au départ, la simulation (que Ray gère sous le capot) démarre par défaut " +"avec toutes les ressources disponibles sur le système, qu'elle partage " +"entre les clients. Cela ne signifie pas qu'elle les divise de manière " +"égale entre tous, ni que l'apprentissage du modèle se fait sur tous les " +"clients simultanément. Tu en apprendras plus à ce sujet dans la suite de " +"ce blog. Tu peux vérifier les ressources du système en exécutant ce qui " +"suit :" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "Dans Google Colab, le résultat que tu obtiens peut ressembler à ceci :" + +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" msgstr "" +"Cependant, tu peux écraser les valeurs par défaut. Lorsque tu démarres " +"une simulation, fais ce qui suit (tu n'as pas besoin de les écraser " +"toutes) :" -#: ../../source/index.rst:148 -#, fuzzy -msgid "Contributor docs" -msgstr "Configuration du contributeur" +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." +msgstr "Spécifions également la ressource pour un seul client." -#: ../../source/index.rst:150 -#, fuzzy +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" -"Les auteurs de Flower sont heureux d'accueillir des contributions " -"externes. Les guides suivant sont là pour vous accompagner dans cette " -"direction." - -#: ../../source/ref-api-cli.rst:2 -#, fuzzy -msgid "Flower CLI reference" -msgstr "Client de Flower" +"Ray ne démarrera un nouveau client que lorsqu'il disposera de toutes les " +"ressources nécessaires (de manière à ce qu'ils fonctionnent en parallèle)" +" lorsque les ressources le permettront." -#: ../../source/ref-api-cli.rst:7 +#: ../../source/how-to-monitor-simulation.rst:228 #, fuzzy -msgid "flower-simulation" -msgstr "Simulation de moniteur" - -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower-superlink" +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." +msgstr "" +"Dans l'exemple ci-dessus, un seul client sera exécuté, donc tes clients " +"ne fonctionneront pas simultanément. En définissant " +":code:`client_num_gpus = 0.5`, tu pourras exécuter deux clients et donc " +"les faire fonctionner simultanément. Fais attention à ne pas demander " +"plus de ressources que celles disponibles. Si tu as spécifié " +":code:`client_num_gpus = 2`, la simulation ne démarrera pas (même si tu " +"as 2 GPU mais que tu as décidé d'en définir 1 dans " +":code:`ray_init_args`)." -#: ../../source/ref-api-cli.rst:27 -#, fuzzy -msgid "flower-client-app" -msgstr "Flower ClientApp." +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "FAQ" -#: ../../source/ref-api-cli.rst:37 -#, fuzzy -msgid "flower-server-app" -msgstr "flower-driver-api" +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." +msgstr "Q : Je ne vois aucune mesure enregistrée." -#: ../../source/ref-api/flwr.rst:2 -#, fuzzy -msgid "flwr" -msgstr "Fleur" +#: ../../source/how-to-monitor-simulation.rst:239 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." +msgstr "" +"R : Il se peut que le délai ne soit pas correctement défini. Le paramètre" +" se trouve dans le coin supérieur droit (\"Dernières 30 minutes\" par " +"défaut). Modifie le délai pour qu'il corresponde à la période pendant " +"laquelle la simulation s'est déroulée." -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -msgid "Modules" +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" +"Q : Je vois s'afficher \"Serveur Grafana non détecté. Vérifie que le " +"serveur Grafana fonctionne et actualise cette page\" après avoir accédé à" +" l'onglet Métriques dans Ray Dashboard." -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-monitor-simulation.rst:246 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" msgstr "" +"R : Grafana n'est probablement pas en cours d'exécution. Vérifie les " +"services en cours d'exécution" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +#: ../../source/how-to-monitor-simulation.rst:252 #, fuzzy -msgid "Flower client." -msgstr "Client de Flower" +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." +msgstr "" +"Q : Je vois \"This site can't be reached\" quand je vais sur " +"``_." -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." msgstr "" +"R : Soit la simulation est déjà terminée, soit tu dois encore démarrer " +"Prometheus." -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "Composants communs partagés entre le serveur et le client." +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" +msgstr "Ressources" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" +#: ../../source/how-to-monitor-simulation.rst:259 +#, fuzzy +msgid "" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" +"Tableau de bord Ray : ``_" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of +#: ../../source/how-to-monitor-simulation.rst:261 #, fuzzy -msgid "Flower server." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" +"Ray Metrics : ``_" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +#: ../../source/how-to-run-simulations.rst:2 #, fuzzy -msgid "Flower simulation." +msgid "Run simulations" msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "client" - -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -#, fuzzy -msgid "Functions" -msgstr "Les quatre fonctions :" - -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr "" - -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -#, fuzzy -msgid "Run Flower client app." -msgstr "Client de Flower" - -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -#, fuzzy -msgid "Run Flower SuperNode." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:8 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: ../../source/how-to-run-simulations.rst:19 +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:26 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-run-simulations.rst:31 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" +#: ../../source/how-to-run-simulations.rst:33 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:38 +msgid "" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:47 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of +#: ../../source/how-to-run-simulations.rst:73 #, fuzzy -msgid "Flower ClientApp." -msgstr "Flower ClientApp." +msgid "VirtualClientEngine resources" +msgstr "Moteur de client virtuel" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:75 +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" msgstr "" -#: ../../source/ref-api/flwr.client.rst:52::1 -#, fuzzy -msgid ":py:obj:`flwr.client.mod `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -#, fuzzy -msgid "Flower Built-in Mods." -msgstr "Client de Flower" +#: ../../source/how-to-run-simulations.rst:99 +msgid "" +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." +msgstr "" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-run-simulations.rst:110 +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:111 +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -#, fuzzy -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" +msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:132 +msgid "" +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:140 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:145 +#, fuzzy +msgid "Simulation examples" +msgstr "Exemples de PyTorch" + +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of +#: ../../source/how-to-run-simulations.rst:151 #, fuzzy -msgid "Get the run context from this client." -msgstr "Évaluer la réponse d'un client." +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +#: ../../source/how-to-run-simulations.rst:159 #, fuzzy -msgid "Return the current local model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +msgid "Multi-node Flower simulations" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:161 +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr "" - -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:166 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-run-simulations.rst:168 +msgid "" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-run-simulations.rst:171 +msgid "" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-run-simulations.rst:174 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -#, fuzzy -msgid "Parameters" -msgstr "Paramètres du modèle." - -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:178 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -#, fuzzy -msgid "Returns" -msgstr "Ressources" - -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:181 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" msgstr "" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:187 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" msgstr "" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:189 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:192 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" -#: flwr.client.client.Client.get_parameters:7 of +#: ../../source/how-to-run-simulations.rst:202 #, fuzzy -msgid "The current local model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +msgid "Considerations for simulations" +msgstr "Simulation de moniteur" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:206 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." msgstr "" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." +#: ../../source/how-to-run-simulations.rst:209 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#: ../../source/how-to-run-simulations.rst:217 #, fuzzy -msgid "ClientApp" -msgstr "client" +msgid "GPU resources" +msgstr "Ressources" -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" +#: ../../source/how-to-run-simulations.rst:219 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -#, fuzzy -msgid "Examples" -msgstr "Exemples de PyTorch" - -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-run-simulations.rst:222 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:225 msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" -#: flwr.client.client_app.ClientApp:21 of +#: ../../source/how-to-run-simulations.rst:228 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." -msgstr "" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr "" - -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:231 +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." +#: ../../source/how-to-run-simulations.rst:235 +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#: ../../source/how-to-run-simulations.rst:240 #, fuzzy -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." -msgstr "" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +msgid "TensorFlow with GPUs" +msgstr "Exemples de TensorFlow" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:242 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-run-simulations.rst:249 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of +#: ../../source/how-to-run-simulations.rst:272 #, fuzzy -msgid "Train the provided parameters using the locally held dataset." -msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +msgid "" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." +msgstr "" +"`Quickstart TensorFlow (Code) " +"`_" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:278 msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:286 msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +#, fuzzy +msgid "Save and load model checkpoints" +msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" -msgstr "" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 #, fuzzy -msgid "Convert to object to Client type and return it." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +msgid "Model checkpointing" +msgstr "Point de contrôle du modèle" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy +msgid "" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" +"Les mises à jour du modèle peuvent être conservées côté serveur en " +"personnalisant les méthodes :code:`Strategy`. L'implémentation de " +"stratégies personnalisées est toujours possible, mais dans de nombreux " +"cas, il peut être plus pratique de simplement personnaliser une stratégie" +" existante. L'exemple de code suivant définit une nouvelle " +":code:`SaveModelStrategy` qui personnalise la stratégie intégrée " +":code:`FedAvg` existante. En particulier, il personnalise " +":code:`aggregate_fit` en appelant :code:`aggregate_fit` dans la classe de" +" base (:code:`FedAvg`). Il continue ensuite à sauvegarder les poids " +"retournés (agrégés) avant de renvoyer ces poids agrégés à l'appelant " +"(c'est-à-dire le serveur) :" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 #, fuzzy -msgid "The current (global) model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +msgid "Save and load PyTorch checkpoints" +msgstr "Sauvegarde et chargement des points de contrôle PyTorch" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +#, fuzzy msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" +"Comme dans l'exemple précédent, mais avec quelques étapes " +"supplémentaires, nous allons montrer comment stocker un point de contrôle" +" PyTorch en utilisant la fonction ``torch.save``. Tout d'abord, " +"``aggregate_fit`` renvoie un objet ``Parameters`` qui doit être " +"transformé en une liste de `ndarray`` NumPy, puis ceux-ci sont " +"transformés en ``state_dict`` PyTorch en suivant la structure de la " +"classe ``OrderedDict``." -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" +"Pour charger ta progression, il te suffit d'ajouter les lignes suivantes " +"à ton code. Note que cela va itérer sur tous les points de contrôle " +"sauvegardés et charger le plus récent :" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "Passe à Flower 1.0" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" +"Flower 1.0 est arrivé. En plus de nouvelles fonctionnalités, Flower 1.0 " +"fournit une base stable pour la croissance future. Par rapport à Flower " +"0.19 (et aux autres versions de la série 0.x), il y a quelques " +"changements qui nécessitent de modifier le code des projets de la série " +"0.x existants." -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" +msgstr "Installer la mise à jour" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" msgstr "" +"Voici comment mettre à jour une installation existante vers Flower 1.0 en" +" utilisant soit pip soit Poetry :" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "pip : ajoute ``-U`` lors de l'installation." + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" +"``python -m pip install -U flwr`` (lors de l'utilisation de " +"``start_server`` et ``start_client``)" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" msgstr "" +"``python -m pip install -U 'flwr[simulation]'`` (lors de l'utilisation de" +" ``start_simulation``)" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -#, fuzzy -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" +"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " +"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " +"poetry.lock`` avant d'exécuter ``poetry install``)." -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" +"``flwr = \"^1.0.0\"`` (lors de l'utilisation de ``start_server`` et " +"``start_client``)" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (lors de " +"l'utilisation de ``start_simulation``)" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -#, fuzzy -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" +msgstr "Changements nécessaires" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of -msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." msgstr "" +"Les changements de rupture suivants nécessitent des mises à jour " +"manuelles." -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of -msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" +msgstr "Généralités" -#: ../../source/ref-api/flwr.client.mod.rst:2 -msgid "mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" msgstr "" +"Passe tous les arguments comme des arguments de mots-clés (et non comme " +"des arguments de position). Voici un exemple :" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" +"Flower 0.19 (arguments positionnels) : ``start_client(\"127.0.0.1:8080\"," +" FlowerClient())``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -#, fuzzy -msgid "Client-side adaptive clipping modifier." -msgstr "Logique côté client" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" -":py:obj:`fixedclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" +"Fleur 1.0 (arguments de mots-clés) : " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -#, fuzzy -msgid "Client-side fixed clipping modifier." -msgstr "Logique côté client" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "Client" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" +"Sous-classes de ``NumPyClient`` : changez ``def get_parameters(self):`` " +"en ``def get_parameters(self, config):``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" +"Sous-classes de ``Client`` : changez ``def get_parameters(self):`` en " +"``def get_parameters(self, ins : GetParametersIns):``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "Handle incoming message and return results, following the SecAgg protocol." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "Stratégies / ``démarrer_serveur`` / ``démarrer_simulation``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" +"Passez ``ServerConfig`` (au lieu d'un dictionnaire) à ``start_server`` et" +" ``start_simulation``. Voici un exemple :" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" -"Handle incoming message and return results, following the SecAgg+ " -"protocol." +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" +"Flower 0.19 : ``start_server(..., config={\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}, ...)``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\," -" ctxt\\, call\\_next\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -msgid "Message size mod." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" +"Remplacer ``num_rounds=1`` dans ``start_simulation`` par le nouveau " +"``config=ServerConfig(...)`` (voir point précédent)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" +"Supprime le paramètre ``force_final_distributed_eval`` des appels à " +"``start_server``. L'évaluation distribuée sur tous les clients peut être " +"activée en configurant la stratégie pour échantillonner tous les clients " +"pour l'évaluation après le dernier tour de formation." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of -#, fuzzy -msgid "Parameters size mod." -msgstr "Paramètres du modèle." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "Renomme les fonctions de conversion des paramètres et des tableaux :" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``Poids_à_paramètres`` --> ``Réseaux_à_paramètres``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," -" sensitivity\\, ...\\)" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" +"Initialisation de la stratégie : si la stratégie repose sur les valeurs " +"par défaut de ``fraction_fit`` et ``fraction_evaluate``, fixer " +"manuellement ``fraction_fit`` et ``fraction_evaluate`` à `0.1``. Les " +"projets qui ne créent pas manuellement une stratégie (en appelant " +"``start_server` ou ``start_simulation`` sans passer une instance de " +"stratégie) doivent maintenant initialiser manuellement FedAvg avec " +"``fraction_fit`` et ``fraction_evaluate`` fixés à ``0.1``." -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -#, fuzzy -msgid "Modifier for local differential privacy." -msgstr "Confidentialité différentielle" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "Renommer les paramètres de stratégie intégrés (par exemple, ``FedAvg``) :" -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -msgid "LocalDpMod" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of -msgid "" -"This mod clips the client model updates and adds noise to the params " -"before sending them to the server." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients` --> ``min_evaluate_clients``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" +"Renommez ``rnd`` en ``server_round``. Cela a un impact sur plusieurs " +"méthodes et fonctions, par exemple, ``configure_fit``, ``aggregate_fit``," +" ``configure_evaluate``, ``aggregate_evaluate``, et ``evaluate_fn``." -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "Ajoute ``server_round`` et ``config`` à `evaluate_fn`` :" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19 : ``def evaluate(parameters : NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" -"The failure probability. The probability that the privacy mechanism fails" -" to provide the desired level of privacy. A smaller value of delta " -"indicates a stricter privacy guarantee." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" +"Flower 1.0 : ``def evaluate(server_round : int, parameters : NDArrays, " +"config : Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "Create an instance of the local DP mod and add it to the client-side mods:" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" +msgstr "Stratégies personnalisées" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 +msgid "" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" +"Le type du paramètre ``failures`` a changé de ``List[BaseException]`` à " +"``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (dans " +"``aggregate_fit``) et ``List[Union[Tuple[ClientProxy, EvaluateRes], " +"BaseException]]`` (dans ``aggregate_evaluate``)" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " -"wrapper." -msgstr "" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -msgid "The wrapper sends the clipping_norm value to the client." +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" msgstr "" +"La méthode ``Stratégie`` `évaluer`` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre :" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "This mod clips the client model updates before sending them to the server." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19 : ``def evaluate(self, parameters : Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 1.0 : ``def evaluate(self, server_round : int, parameters : " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]]:``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -#, fuzzy -msgid "Notes" -msgstr "Aucun" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" +msgstr "Améliorations facultatives" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" msgstr "" +"En plus des changements nécessaires mentionnés ci-dessus, il existe un " +"certain nombre d'améliorations potentielles qui viennent d'être rendues " +"possibles :" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of -msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" +"Supprime les méthodes \"placeholder\" des sous-classes de ``Client`` ou " +"de ``NumPyClient``. Si tu utilises, par exemple, l'évaluation côté " +"serveur, alors les implémentations \"placeholder\" de ``evaluate`` ne " +"sont plus nécessaires." -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 -msgid "fixedclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" +"Configurez le délai d'attente de la ronde via ``start_simulation`` : " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" +msgstr "Aide supplémentaire" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" +"La plupart des `exemples de code Flower officiels " +"`_ sont déjà mis à " +"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " +"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " +"Flower `_ et utilise le canal " +"``#questions``." -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#, fuzzy +msgid "Upgrade to Flower Next" +msgstr "Passe à Flower 1.0" -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" +#: ../../source/how-to-upgrade-to-flower-next.rst:4 +msgid "" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:11 +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." +#: ../../source/how-to-upgrade-to-flower-next.rst:15 +msgid "Let's dive in!" msgstr "" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 #, fuzzy -msgid "parameters\\_size\\_mod" -msgstr "Paramètres du modèle." +msgid "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "" +"Voici comment mettre à jour une installation existante vers Flower 1.0 en" +" utilisant soit pip soit Poetry :" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:74 +msgid "or if you need Flower Next with simulation:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:80 msgid "" -"This mod logs the number of parameters transmitted in the message as well" -" as their size in bytes." +"Ensure you set the following version constraint in your " +"``requirements.txt``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:101 #, fuzzy -msgid "secaggplus\\_mod" -msgstr "Flux de travail" +msgid "Using Poetry" +msgstr "Utiliser la poésie (recommandé)" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +#: ../../source/how-to-upgrade-to-flower-next.rst:103 +#, fuzzy +msgid "" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" +"Poetry : mettez à jour la dépendance ``flwr`` dans ``pyproject.toml`` " +"puis réinstallez (n'oubliez pas de supprimer ``poetry.lock`` via ``rm " +"poetry.lock`` avant d'exécuter ``poetry install``)." -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:106 #, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/ref-api/flwr.client.start_client.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:123 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:132 #, fuzzy -msgid "start\\_client" -msgstr "start_client" +msgid "|clientapp_link|_" +msgstr "client" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-next.rst:134 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" msgstr "" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#, fuzzy +msgid "|serverapp_link|_" +msgstr "serveur" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-upgrade-to-flower-next.rst:159 msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +msgid "Deployment" msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-upgrade-to-flower-next.rst:182 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-upgrade-to-flower-next.rst:185 msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +"Here's an example to start the server without HTTPS (only for " +"prototyping):" msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: ../../source/how-to-upgrade-to-flower-next.rst:201 msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -#: flwr.client.app.start_client:31 of +#: ../../source/how-to-upgrade-to-flower-next.rst:229 +#, fuzzy +msgid "Simulation in CLI" +msgstr "Simulation de moniteur" + +#: ../../source/how-to-upgrade-to-flower-next.rst:231 msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-upgrade-to-flower-next.rst:264 msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" +#: ../../source/how-to-upgrade-to-flower-next.rst:281 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +msgid "Simulation in a Notebook" msgstr "" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" +#: ../../source/how-to-upgrade-to-flower-next.rst:307 +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" msgstr "" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:351 #, fuzzy -msgid "start\\_numpy\\_client" -msgstr "start_numpy_client" - -#: flwr.client.app.start_numpy_client:5 of msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." -msgstr "" - -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" +"La plupart des `exemples de code Flower officiels " +"`_ sont déjà mis à " +"jour vers Flower 1.0, ils peuvent servir de référence pour l'utilisation " +"de l'API Flower 1.0. Si vous avez d'autres questions, `joins le Slack " +"Flower `_ et utilise le canal " +"``#questions``." -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "commun" +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#, fuzzy +msgid "Important" +msgstr "Changements importants :" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:360 +msgid "" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -#, fuzzy -msgid "Create Array from NumPy ndarray." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +msgid "Happy migrating! 🚀" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "Désérialise le tableau numérique NumPy à partir d'octets." +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:7 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/how-to-use-built-in-mods.rst:12 +msgid "What are Mods?" msgstr "" -"Configure la journalisation vers un fichier et/ou un serveur de " -"journalisation distant." -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +#: ../../source/how-to-use-built-in-mods.rst:23 +msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "Using Mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." - -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "Sérialise le tableau numérique NumPy en octets." +#: ../../source/how-to-use-built-in-mods.rst:41 +msgid "1. Import the required mods" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-use-built-in-mods.rst:43 +msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." +#: ../../source/how-to-use-built-in-mods.rst:51 +msgid "2. Define your client function" msgstr "" -"Construit une date à partir de time.time() avec le fuseau horaire réglé " -"sur UTC." -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "Convertit les ndarrays NumPy en objets de paramètres." +#: ../../source/how-to-use-built-in-mods.rst:62 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +#: ../../source/how-to-use-built-in-mods.rst:78 +#, fuzzy +msgid "Order of execution" +msgstr "Dépréciations" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:80 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." +#: ../../source/how-to-use-built-in-mods.rst:83 +msgid "``example_mod_1`` (outermost mod)" +msgstr "" + +#: ../../source/how-to-use-built-in-mods.rst:84 +msgid "``example_mod_2`` (next mod)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:85 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "``example_mod_2`` (on the way back)" msgstr "" -"ClientMessage est un conteneur utilisé pour contenir un message de " -"résultat." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-use-built-in-mods.rst:88 +msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "Codes d'état du client." +#: ../../source/how-to-use-built-in-mods.rst:90 +msgid "" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#, fuzzy +#: ../../source/how-to-use-built-in-mods.rst:97 msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-use-built-in-mods.rst:101 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "" + +#: ../../source/how-to-use-differential-privacy.rst:2 #, fuzzy -msgid "Configs record." -msgstr "Configurer les clients" +msgid "Use Differential Privacy" +msgstr "Confidentialité différentielle" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-use-differential-privacy.rst:4 +msgid "" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." +#: ../../source/how-to-use-differential-privacy.rst:10 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/how-to-use-differential-privacy.rst:17 +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "Message DisconnectRes envoyé par le client au serveur." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:21 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "Évaluer les instructions pour un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:26 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "Évaluer la réponse d'un client." +#: ../../source/how-to-use-differential-privacy.rst:31 +#, fuzzy +msgid "Server-side Clipping" +msgstr "Logique côté serveur" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/how-to-use-differential-privacy.rst:33 +msgid "" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "Types d'événements télémétriques." +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "server side clipping" +msgstr "Logique côté serveur" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-use-differential-privacy.rst:43 +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "Instructions d'ajustement pour un client." +#: ../../source/how-to-use-differential-privacy.rst:64 +#, fuzzy +msgid "Client-side Clipping" +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:66 msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "Réponse adaptée d'un client." +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "Logique côté client" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/how-to-use-differential-privacy.rst:78 +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." +#: ../../source/how-to-use-differential-privacy.rst:97 +msgid "" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "Demande de paramètres pour un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:115 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "Demande de propriétés pour un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "Réponse des propriétés d'un client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:140 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/how-to-use-differential-privacy.rst:145 +msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/how-to-use-differential-privacy.rst:147 +msgid "" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr "" +#: ../../source/how-to-use-strategies.rst:2 +#, fuzzy +msgid "Use strategies" +msgstr "Stratégies personnalisées" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." +#: ../../source/how-to-use-strategies.rst:4 +#, fuzzy +msgid "" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" +"Flower permet une personnalisation complète du processus d'apprentissage " +"grâce à l'abstraction :code:`Stratégie`. Un certain nombre de stratégies " +"intégrées sont fournies dans le cadre principal." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:7 msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" msgstr "" +"Il y a trois façons de personnaliser la manière dont Flower orchestre le " +"processus d'apprentissage du côté du serveur :" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." -msgstr "" +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" +msgstr "Utilise une stratégie existante, par exemple :code:`FedAvg`" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -msgstr "" +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" +msgstr "Personnalise une stratégie existante avec des fonctions de rappel" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." -msgstr "" +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" +msgstr "Mets en place une nouvelle stratégie" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" +msgstr "Utilise une stratégie existante" + +#: ../../source/how-to-use-strategies.rst:17 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" msgstr "" +"Flower intègre un certain nombre de stratégies d'apprentissage fédéré " +"populaires. Une stratégie intégrée peut être instanciée comme suit :" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" +"Cela crée une stratégie dont tous les paramètres sont laissés à leur " +"valeur par défaut et la transmet à la fonction :code:`start_server`. Il " +"est généralement recommandé d'ajuster quelques paramètres lors de " +"l'instanciation :" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:45 msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" +"Les stratégies existantes offrent plusieurs façons de personnaliser leur " +"comportement. Les fonctions de rappel permettent aux stratégies d'appeler" +" le code fourni par l'utilisateur pendant l'exécution." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "Paramètres du modèle." +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" +msgstr "Configurer l'adaptation et l'évaluation du client" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" +"Le serveur peut transmettre de nouvelles valeurs de configuration au " +"client à chaque tour en fournissant une fonction à " +":code:`on_fit_config_fn`. La fonction fournie sera appelée par la " +"stratégie et doit renvoyer un dictionnaire de paires de valeurs de clés " +"de configuration qui seront envoyées au client. Elle doit renvoyer un " +"dictionnaire de valeurs de configuration arbitraires :code:`client.fit` " +"et :code:`client.evaluate` au cours de chaque tour d'apprentissage " +"fédéré." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/how-to-use-strategies.rst:84 #, fuzzy -msgid "Parameters record." -msgstr "Paramètres du modèle." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" +"Le :code:`on_fit_config_fn` peut être utilisé pour passer des valeurs de " +"configuration arbitraires du serveur au client, et changer poétiquement " +"ces valeurs à chaque tour, par exemple pour ajuster le taux " +"d'apprentissage. Le client recevra le dictionnaire renvoyé par le " +":code:`on_fit_config_fn` dans sa propre fonction :code:`client.fit()`." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "Message de reconnexion du serveur au client." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" +"Comme pour :code:`on_fit_config_fn`, il existe aussi " +":code:`on_evaluate_config_fn` pour personnaliser la configuration envoyée" +" à :code:`client.evaluate()`" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "" +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" +msgstr "Configuration de l'évaluation côté serveur" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." msgstr "" -"ServerMessage est un conteneur utilisé pour contenir un message " -"d'instruction." +"L'évaluation côté serveur peut être activée en passant une fonction " +"d'évaluation à :code:`evaluate_fn`." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../source/how-to-use-strategies.rst:101 +#, fuzzy +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" +"L'écriture d'une stratégie entièrement personnalisée est un peu plus " +"complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide " +"`Implémentation des stratégies `_ pour " +"en savoir plus." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "Statut du client." +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "Tutoriel" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" -msgstr "" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "Quickstart tutorials" -#: flwr.common.record.parametersrecord.Array:3 of -msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." -msgstr "" +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" +msgstr "Guides" -#: flwr.common.record.parametersrecord.Array:6 of -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +#: ../../source/index.rst:106 +msgid "Legacy example guides" msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of -msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." -msgstr "" +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" +msgstr "Explications" -#: flwr.common.record.parametersrecord.Array:12 of -msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." -msgstr "" +#: None:-1 +msgid "API reference" +msgstr "Référence pour l'API" -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." -msgstr "" +#: ../../source/index.rst:145 +msgid "Reference docs" +msgstr "Référence pour la documentation" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: ../../source/index.rst:160 #, fuzzy -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Contributor tutorials" +msgstr "Configuration du contributeur" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of +#: ../../source/index.rst:167 #, fuzzy -msgid "Return the array as a NumPy array." -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" -msgstr "" +msgid "Contributor how-to guides" +msgstr "Guide pour les contributeurs" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/index.rst:179 #, fuzzy -msgid ":py:obj:`shape `\\" -msgstr "serveur.stratégie.Stratégie" +msgid "Contributor explanations" +msgstr "Explications" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/index.rst:185 #, fuzzy -msgid ":py:obj:`stype `\\" -msgstr "serveur.stratégie.Stratégie" +msgid "Contributor references" +msgstr "Configuration du contributeur" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" -msgstr "" +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#: ../../source/index.rst:2 #, fuzzy -msgid "ClientMessage" -msgstr "Côté client" +msgid "Flower Framework Documentation" +msgstr "Rédiger de la documentation" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." msgstr "" +"Bienvenue sur la documentation de Flower. `Flower `_ " +"est un framework de federated learning convivial et facile à utiliser." -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" -msgstr "" +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "Rejoignez la communauté de Flower" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/index.rst:13 msgid "" -":py:obj:`get_parameters_res " -"`\\" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." msgstr "" +"Le communauté de Flower s'agrandit rapidement - on est un super groupe de" +" chercheurs, ingénieurs, étudiants, professionnels, académiques, et " +"autres hobbyistes." -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/index.rst:16 +msgid "Join us on Slack" +msgstr "Join us on Slack" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower Framework" + +#: ../../source/index.rst:25 msgid "" -":py:obj:`get_properties_res " -"`\\" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" +"Ce guide utilisateur s'adresse à des chercheurs et des développeurs qui " +"veulent utiliser Flower pour transposer des workloads de Machine Learning" +" existantes dans un scenario fédéré. Un des buts de Flower est de rendre " +"cela le plus evident possible. Lisez la suite pour en apprendre plus." -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "Tutoriels" + +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." msgstr "" +"Une serie de tutoriels de Federated Learning, l'endroit parfait pour " +"débuter." -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" +#: ../../source/index.rst:62 +#, fuzzy +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" msgstr "" +"QUICKSTART TUTORIALS: :ref:`PyTorch ` | " +":ref:`TensorFlow ` | :ref:`🤗 Transformers " +"` | :ref:`JAX ` | :ref:`Pandas " +"` | :ref:`fastai ` | :ref:`PyTorch " +"Lightning ` | :ref:`MXNet ` | :ref:`scikit-learn ` | :ref:`XGBoost " +"` | :ref:`Android ` | :ref:`iOS " +"`" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/index.rst:75 +#, fuzzy +msgid "And TensorFlow:" +msgstr "Exemples de TensorFlow" + +#: ../../source/index.rst:83 msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." msgstr "" +"Guides orientés sur la résolutions étapes par étapes de problèmes ou " +"objectifs specifiques." -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/index.rst:116 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." msgstr "" +"Guides orientés sur la compréhension et l'explication des sujets et idées" +" de fonds sur lesquels sont construits Flower et l'IA collaborative." -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../source/index.rst:128 +#, fuzzy +msgid "References" +msgstr "Référence" + +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." +msgstr "Référence de l'API orientée sur l'information pure." + +#: ../../source/index.rst:139::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +#: ../../source/index.rst:139::1 flwr:1 of +msgid "Flower main package." msgstr "" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#: ../../source/index.rst:155 #, fuzzy -msgid "ConfigsRecord" -msgstr "Configurer les clients" +msgid "Contributor docs" +msgstr "Configuration du contributeur" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/index.rst:157 +#, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." msgstr "" +"Les auteurs de Flower sont heureux d'accueillir des contributions " +"externes. Les guides suivant sont là pour vous accompagner dans cette " +"direction." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api-cli.rst:2 +#, fuzzy +msgid "Flower CLI reference" +msgstr "Client de Flower" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." -msgstr "" +#: ../../source/ref-api-cli.rst:7 +#, fuzzy +msgid "flwr CLI" +msgstr "Client de Flower" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../flwr:1 +msgid "flwr is the Flower command line interface." msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." -msgstr "" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Options" +msgstr "Solution" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../flwr:1 +msgid "Install completion for the current shell." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." +#: ../../flwr:1 +msgid "" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../flwr build:1 +msgid "" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" +#: ../../flwr install:1 +#, fuzzy +msgid "Install a Flower App Bundle." +msgstr "Installer Flower" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" msgstr "" -#: flwr.common.context.Context:3 of +#: ../../flwr install:1 msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -#, fuzzy -msgid ":py:obj:`state `\\" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" msgstr "" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." +#: ../../flwr install:1 +msgid "The desired install path." msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "Amélioration de la documentation" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" -msgstr "" +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "Améliorations facultatives" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." +#: ../../flwr install:1 +msgid "The source FAB file to install." msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +#: ../../flwr log run #, fuzzy -msgid "EvaluateIns" -msgstr "Explications" +msgid "default" +msgstr "Flux de travail" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" -msgstr "" +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "Amélioration de la documentation" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" -msgstr "" +#: ../../flwr log:1 +#, fuzzy +msgid "The Flower run ID to query" +msgstr "Rejoignez la communauté de Flower" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../flwr log:1 +msgid "Path of the Flower project to run" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" -msgstr "" +#: ../../flwr new:1 +#, fuzzy +msgid "Create new Flower App." +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: ../../flwr new +#, fuzzy +msgid "options" +msgstr "Solution" + +#: ../../flwr new:1 +msgid "" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new:1 #, fuzzy -msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid "The name of the Flower App" +msgstr "Chargement des données" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." +#: ../../flwr run:1 +#, fuzzy +msgid "Run Flower App." +msgstr "Serveur de Flower" + +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of -msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 #, fuzzy -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." -msgstr "" +msgid "``False``" +msgstr ":code:`évaluer`" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 #, fuzzy -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Path of the Flower App to run." +msgstr "Chargement des données" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:16 #, fuzzy -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "flower-simulation" +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." -msgstr "" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:36 #, fuzzy -msgid ":py:obj:`title `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr "" +msgid "flower-supernode" +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." -msgstr "" +#: ../../source/ref-api-cli.rst:46 +#, fuzzy +msgid "flower-server-app" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:50 msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." -msgstr "" +#: ../../source/ref-api-cli.rst:64 +#, fuzzy +msgid "flower-superexec" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.rst:2 +#, fuzzy +msgid "flwr" +msgstr "Fleur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgid ":py:obj:`flwr.client `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +#, fuzzy +msgid "Flower client." +msgstr "Client de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.common `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "Composants communs partagés entre le serveur et le client." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`lower `\\ \\(\\)" +msgid ":py:obj:`flwr.server `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +#, fuzzy +msgid "Flower server." +msgstr "Serveur de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgid ":py:obj:`flwr.simulation `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." -msgstr "" +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +#, fuzzy +msgid "Flower simulation." +msgstr "Simulation de moniteur" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "client" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +#, fuzzy +msgid "Functions" +msgstr "Les quatre fonctions :" + +#: ../../source/ref-api/flwr.client.rst:23::1 msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../source/ref-api/flwr.client.rst:32::1 msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of #, fuzzy -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Flower ClientApp." +msgstr "Flower ClientApp." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgid ":py:obj:`flwr.client.mod `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of #, fuzzy -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" -msgstr "" +msgid "Flower Built-in Mods." +msgstr "Client de Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" -msgstr "" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +#, fuzzy +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of #, fuzzy -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Get the run context from this client." +msgstr "Évaluer la réponse d'un client." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of #, fuzzy -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Return the current local model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of #, fuzzy -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Parameters" +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of #, fuzzy -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" +msgid "Returns" +msgstr "Ressources" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: flwr.client.client.Client.fit:3 of msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.fit:8 of msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.get_parameters:7 of #, fuzzy -msgid ":py:obj:`maketrans `\\" -msgstr "serveur.stratégie.Stratégie" +msgid "The current local model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#, fuzzy +msgid "ClientApp" +msgstr "client" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" -msgstr "" +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +#, fuzzy +msgid "Examples" +msgstr "Exemples de PyTorch" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: flwr.client.client_app.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.client_app.ClientApp:16 of msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.client_app.ClientApp:21 of msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" -msgstr "" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +#, fuzzy +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +#, fuzzy +msgid "Train the provided parameters using the locally held dataset." +msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of #, fuzzy -msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" -msgstr "serveur.stratégie.Stratégie" +msgid "Convert to object to Client type and return it." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" -msgstr "serveur.stratégie.Stratégie" +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of #, fuzzy -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" -msgstr "serveur.stratégie.Stratégie" +msgid "The current (global) model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: flwr.common.EventType.capitalize:3 of +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "" - -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." msgstr "" -#: flwr.common.EventType.count:1 of +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." -msgstr "" - -#: flwr.common.EventType.encode:3 of -msgid "encoding" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." msgstr "" -#: flwr.common.EventType.encode:6 of +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.endswith:1 of +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." -msgstr "" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +#, fuzzy +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: flwr.common.EventType.format:1 of -msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." msgstr "" -#: flwr.common.EventType.format_map:1 of +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." -msgstr "" +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +#, fuzzy +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" -#: flwr.common.EventType.isalnum:3 of +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." msgstr "" -#: flwr.common.EventType.isalpha:3 of +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" -#: flwr.common.EventType.isascii:3 of -msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" msgstr "" -#: flwr.common.EventType.isdecimal:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.isdigit:3 of -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +#, fuzzy +msgid "Client-side adaptive clipping modifier." +msgstr "Logique côté client" -#: flwr.common.EventType.isidentifier:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.islower:3 of -msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +#, fuzzy +msgid "Client-side fixed clipping modifier." +msgstr "Logique côté client" -#: flwr.common.EventType.isnumeric:3 of -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#, fuzzy +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.EventType.isprintable:3 of -msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: flwr.common.EventType.isspace:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." msgstr "" -#: flwr.common.EventType.isupper:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.join:3 of -msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." -msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +#, fuzzy +msgid "Parameters size mod." +msgstr "Paramètres du modèle." -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -#: flwr.common.EventType.maketrans:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.partition:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -#: flwr.common.EventType.partition:7 of +#: ../../source/ref-api/flwr.client.mod.rst:35::1 msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" msgstr "" -#: flwr.common.EventType.removeprefix:3 of -msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +#, fuzzy +msgid "Modifier for local differential privacy." +msgstr "Confidentialité différentielle" + +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" msgstr "" -#: flwr.common.EventType.removesuffix:3 of +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." msgstr "" -#: flwr.common.EventType.replace:4 of -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: flwr.common.EventType.replace:7 of -msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -#: flwr.common.EventType.rpartition:3 of +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -#: flwr.common.EventType.rpartition:7 of -msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" msgstr "" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of -msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." -msgstr "" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +#, fuzzy +msgid "Notes" +msgstr "Aucun" -#: flwr.common.EventType.split:13 of -msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -#: flwr.common.EventType.splitlines:3 of -msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.startswith:1 of -msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" msgstr "" -#: flwr.common.EventType.title:3 of +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." msgstr "" -#: flwr.common.EventType.translate:5 of -#, fuzzy -msgid "table" -msgstr "Database" - -#: flwr.common.EventType.translate:4 of -msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.translate:7 of -msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#, fuzzy +msgid "parameters\\_size\\_mod" +msgstr "Paramètres du modèle." -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +#, fuzzy +msgid "secaggplus\\_mod" +msgstr "Flux de travail" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" -msgstr "" +#: ../../source/ref-api/flwr.client.start_client.rst:2 +#, fuzzy +msgid "start\\_client" +msgstr "start_client" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -#, fuzzy -msgid "GetParametersIns" -msgstr ":code:`get_parameters`" +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -#, fuzzy -msgid "GetParametersRes" -msgstr ":code:`get_parameters`" +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 #, fuzzy -msgid "Message" -msgstr "Côté serveur" - -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." -msgstr "" +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" -#: flwr.common.message.Message:5 of +#: flwr.client.app.start_numpy_client:5 of msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.common.message.Message:8 of -msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "commun" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#, fuzzy +msgid "Create Array from NumPy ndarray." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." msgstr "" +"Configure la journalisation vers un fichier et/ou un serveur de " +"journalisation distant." -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 #: of -#, fuzzy -msgid "The content of this message." -msgstr "Évaluer la réponse d'un client." +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "Enregistre 'msg % args' avec le niveau de sévérité entier 'level'." -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "Convertit les ndarrays NumPy en objets de paramètres." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" +"Construit une date à partir de time.time() avec le fuseau horaire réglé " +"sur UTC." -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: ../../source/ref-api/flwr.common.rst:30::1 msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." + +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." msgstr "" -#: flwr.common.message.Message.create_reply:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." -msgstr "" - -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." msgstr "" +"ClientMessage est un conteneur utilisé pour contenir un message de " +"résultat." -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "Codes d'état du client." -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Config `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "Configs record." +msgstr "Configurer les clients" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +msgid "Context of your run." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "Message DisconnectRes envoyé par le client au serveur." -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." msgstr "" -#: flwr.common.message.Metadata:13 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." -msgstr "" - -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "Évaluer les instructions pour un client." -#: flwr.common.message.Metadata:21 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -#, fuzzy -msgid ":py:obj:`created_at `\\" -msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "Évaluer la réponse d'un client." -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "Types d'événements télémétriques." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "Instructions d'ajustement pour un client." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "Réponse adaptée d'un client." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "Demande de paramètres pour un client." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "Réponse lorsqu'on te demande de renvoyer des paramètres." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "Demande de propriétés pour un client." -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -msgid "Time-to-live for this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "Réponse des propriétés d'un client." -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" -msgstr "" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Metrics `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy +msgid "Metrics recod." +msgstr "Paramètres du modèle." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "ParametersRecord" -msgstr "Paramètres du modèle." +msgid ":py:obj:`NDArrays `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "Paramètres du modèle." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#, fuzzy +msgid "Parameters record." +msgstr "Paramètres du modèle." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Properties `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "Message de reconnexion du serveur au client." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -#, fuzzy -msgid "ReconnectIns" -msgstr "Collecte centralisée des données" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "" +"ServerMessage est un conteneur utilisé pour contenir un message " +"d'instruction." -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "Statut du client." + +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#, fuzzy +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +#, fuzzy +msgid "Return the array as a NumPy array." +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "ServerMessage" -msgstr "Côté serveur" +msgid ":py:obj:`shape `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#, fuzzy +msgid ":py:obj:`stype `\\" +msgstr "serveur.stratégie.Stratégie" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +#, fuzzy +msgid "ClientMessage" +msgstr "Côté client" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -":py:obj:`get_parameters_ins " -"`\\" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -":py:obj:`get_properties_ins " -"`\\" +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:2 -#, fuzzy -msgid "Status" -msgstr "Statut du client." +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" + +#: ../../source/ref-api/flwr.common.Config.rst:2 #, fuzzy -msgid "configure" +msgid "Config" msgstr "Configurer les clients" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +#, fuzzy +msgid "ConfigsRecord" +msgstr "Configurer les clients" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: logging.Logger.log:3 of +#: flwr.common.record.configsrecord.ConfigsRecord:9 of msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." msgstr "" -"Pour transmettre des informations sur les exceptions, utilise l'argument " -"mot-clé exc_info avec une valeur vraie, par ex." -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." +msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" -msgstr "serveur" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Run Flower server app." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of -#, fuzzy -msgid "Run Flower SuperLink (Driver API and Fleet API)." -msgstr "flower-fleet-api" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`Driver `\\ \\(\\)" +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +#: flwr.common.context.Context:3 of +msgid "The ID that identifies the node." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#, fuzzy +#: flwr.common.context.Context:5 of msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" -msgstr "serveur.stratégie.Stratégie" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of -#, fuzzy -msgid "Flower ServerApp." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.server.rst:38::1 -#, fuzzy +#: flwr.common.context.Context:8 of msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" - -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of -#, fuzzy -msgid "Flower server config." -msgstr "Serveur de Flower" - -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`flwr.server.strategy `\\" +msgid ":py:obj:`node_id `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." -msgstr "" - -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`flwr.server.workflow `\\" +msgid ":py:obj:`node_config `\\" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "Workflows." -msgstr "Flux de travail" +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "ClientManager" -msgstr "client" +msgid ":py:obj:`run_config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +#, fuzzy +msgid "EvaluateIns" +msgstr "Explications" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -#, fuzzy -msgid "Driver" -msgstr "serveur" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" msgstr "" "Flower 1.0 : ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" -msgstr "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" -msgstr "" -"Flower 1.0 : ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of -msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`title `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." -msgstr "" - -#: flwr.server.driver.driver.Driver.create_message:17 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.driver.driver.Driver.push_messages:9 of -msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." -msgstr "" - -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "Évaluation centralisée" - -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -#, fuzzy -msgid "Add metrics entries (from centralized evaluation)." -msgstr "Évaluation centralisée" - -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`config `\\" +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`strategy `\\" +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`history `\\" +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`state `\\" +msgid ":py:obj:`swapcase `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" -msgstr "Serveur" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Replace server strategy." -msgstr "stratégie.du.serveur" +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -#, fuzzy -msgid "ServerApp" -msgstr "serveur" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "" -#: flwr.server.server_app.ServerApp:5 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Use the `ServerApp` with an existing `Strategy`:" -msgstr "Utilise une stratégie existante" +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`main `\\ \\(\\)" +msgid ":py:obj:`isspace `\\ \\(\\)" msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ServerConfig" -msgstr "serveur" +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.server_config.ServerConfig:3 of -msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." -msgstr "" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" +msgid ":py:obj:`maketrans `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - -#: ../../source/ref-api/flwr.server.start_server.rst:2 -#, fuzzy -msgid "start\\_server" -msgstr "serveur.start_server" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: flwr.server.app.start_server:5 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" -#: flwr.server.app.start_server:12 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." -msgstr "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:32 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "CA certificate." -msgstr "Certificats" +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:33 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "server certificate." -msgstr "Certificats" +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:34 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "server private key." -msgstr "stratégie.du.serveur" - -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:42 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Starting an insecure server:" -msgstr "Démarrer le serveur" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.app.start_server:46 of -#, fuzzy -msgid "Starting an SSL-enabled server:" -msgstr "Démarrer le serveur" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -#, fuzzy -msgid "strategy" -msgstr "stratégie.du.serveur" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Bulyan strategy." -msgstr "Stratégies intégrées" +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:3 of msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +"More specifically, make the first character have upper case and the rest " +"lower case." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.count:1 of msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: flwr.common.EventType.encode:3 of +msgid "encoding" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: flwr.common.EventType.encode:9 of +msgid "errors" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.encode:6 of msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.endswith:1 of msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -#, fuzzy -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.format:1 of msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -#, fuzzy -msgid "Federated Averaging strategy." -msgstr "Stratégie de moyenne fédérée." - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.format_map:1 of msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -#, fuzzy -msgid "Federated Averaging with Momentum strategy." -msgstr "Stratégie de moyenne fédérée." - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isalnum:3 of msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -#, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "Configuration de l'évaluation fédérée" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isalpha:3 of msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -#, fuzzy -msgid "Federated Optim strategy." -msgstr "Stratégie de moyenne fédérée." +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isdecimal:3 of msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -#, fuzzy -msgid "Federated Optimization strategy." -msgstr "Stratégie de moyenne fédérée." - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isdigit:3 of msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.islower:3 of msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isprintable:3 of msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.istitle:3 of msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.join:3 of msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: flwr.common.EventType.maketrans:3 of +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.partition:3 of msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.removeprefix:3 of msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#, fuzzy -msgid ":py:obj:`Strategy `\\ \\(\\)" -msgstr "serveur.stratégie.Stratégie" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: flwr.common.EventType.replace:5 of +msgid "count" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: flwr.common.EventType.replace:4 of +msgid "" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: flwr.common.EventType.rpartition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." -msgstr "" - -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -#, fuzzy -msgid "Initial global model parameters." -msgstr "Initialise le modèle global" - -#: flwr.server.strategy.bulyan.Bulyan:27 of +#: flwr.common.EventType.splitlines:3 of msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.title:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.translate:5 of #, fuzzy -msgid "Aggregate evaluation losses using weighted average." -msgstr "Résultats globaux de l'évaluation." +msgid "table" +msgstr "Database" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.translate:4 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Aggregate fit results using Bulyan." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.EventType.translate:7 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -#, fuzzy -msgid "Configure the next round of evaluation." -msgstr "Configuration de l'évaluation côté serveur" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "Initialize global model parameters." -msgstr "Initialise le modèle global" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#, fuzzy +msgid "GetParametersIns" +msgstr ":code:`get_parameters`" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#, fuzzy +msgid "GetParametersRes" +msgstr ":code:`get_parameters`" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "Aggregate evaluation losses using the given strategy." -msgstr "Résultats globaux de l'évaluation." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: ../../source/ref-api/flwr.common.Message.rst:2 +#, fuzzy +msgid "Message" +msgstr "Côté serveur" + +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message:5 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message:8 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A dataclass that captures information about an error that took place when" +" processing another message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -#, fuzzy -msgid "The current round of federated learning." -msgstr "Qu'est-ce que l'apprentissage fédéré ?" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of -msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#, fuzzy +msgid "The content of this message." +msgstr "Évaluer la réponse d'un client." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of -msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of -msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -#, fuzzy -msgid "Create a strategy:" -msgstr "stratégie.du.serveur" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Metadata:13 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "" + +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "Aggregate training results and update clip norms." -msgstr "Résultats globaux de l'évaluation." +msgid ":py:obj:`created_at `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -#, fuzzy -msgid "DifferentialPrivacyClientSideFixedClipping" -msgstr "Confidentialité différentielle" - -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of -msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of -msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 #: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +msgid "Time-to-live for this message." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of +#: ../../source/ref-api/flwr.common.Metrics.rst:2 #, fuzzy -msgid "Add noise to the aggregated parameters." -msgstr "Puis sérialise le résultat agrégé :" +msgid "Metrics" +msgstr "Suivi des mesures" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:3 of msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:9 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: flwr.common.record.metricsrecord.MetricsRecord:12 of +msgid "" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "DifferentialPrivacyServerSideFixedClipping" -msgstr "Confidentialité différentielle" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" -msgstr "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +msgid "NDArrays" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#, fuzzy +msgid "ParametersRecord" +msgstr "Paramètres du modèle." + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -#, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +msgid "Let's see some examples:" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" -msgstr "FedAdagrad" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "Prérequis" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#, fuzzy +msgid "ReconnectIns" +msgstr "Collecte centralisée des données" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.record.recordset.RecordSet:29 of +msgid "Let's see an example." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -#, fuzzy -msgid "FedAdam" -msgstr "FedAdagrad" - -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#, fuzzy +msgid "ServerMessage" +msgstr "Côté serveur" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#: ../../source/ref-api/flwr.common.Status.rst:2 #, fuzzy -msgid "FedAvg" -msgstr "DP-FedAvg" +msgid "Status" +msgstr "Statut du client." -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.common.configure.rst:2 +#, fuzzy +msgid "configure" +msgstr "Configurer les clients" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: logging.Logger.log:3 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." msgstr "" +"Pour transmettre des informations sur les exceptions, utilise l'argument " +"mot-clé exc_info avec une valeur vraie, par ex." -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -#, fuzzy -msgid "FedAvgAndroid" -msgstr "DPFedAvgAdaptive" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "serveur" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "Deserialize NumPy array from bytes." -msgstr "Désérialise le tableau numérique NumPy à partir d'octets." +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "Serialize NumPy array to bytes." -msgstr "Sérialise le tableau numérique NumPy en octets." +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +#, fuzzy +msgid "Flower ServerApp." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +#, fuzzy +msgid "Flower server config." +msgstr "Serveur de Flower" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "Convert parameters object to NumPy weights." -msgstr "Convertit l'objet des paramètres en ndarrays NumPy." +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "FedAvgM" -msgstr "DP-FedAvg" +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +#, fuzzy +msgid "Workflows." +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#, fuzzy +msgid "ClientManager" +msgstr "client" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of -msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy -msgid "Aggregate fit results using median." -msgstr "Résultats globaux de l'évaluation." +msgid "Driver" +msgstr "serveur" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." -msgstr "" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" +"Flower 1.0 : ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid ":py:obj:`run `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "Simulation de moniteur" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:9 of msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of +#: flwr.server.driver.driver.Driver.create_message:17 of msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: flwr.server.driver.driver.Driver.create_message:23 of msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of -msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:9 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:19 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "Évaluation centralisée" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" -msgstr "" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +#, fuzzy +msgid "Add metrics entries (from centralized evaluation)." +msgstr "Évaluation centralisée" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr "serveur.stratégie.Stratégie" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "Aggregate evaluation metrics using average." -msgstr "Résultats globaux de l'évaluation." +msgid ":py:obj:`run_config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "Serveur" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +#, fuzzy +msgid "Replace server strategy." +msgstr "stratégie.du.serveur" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#, fuzzy +msgid "ServerApp" +msgstr "serveur" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server_app.ServerApp:5 of +#, fuzzy +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "Utilise une stratégie existante" + +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy -msgid "FedXgbNnAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +#, fuzzy +msgid "ServerAppComponents" +msgstr "serveur" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of +#, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" +":py:obj:`client_manager " +"`\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#, fuzzy +msgid "ServerConfig" +msgstr "serveur" + +#: flwr.server.server_config.ServerConfig:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +#: ../../source/ref-api/flwr.server.start_server.rst:2 +#, fuzzy +msgid "start\\_server" +msgstr "serveur.start_server" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:12 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -#, fuzzy -msgid "Aggregate fit results using Krum." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.app.start_server:32 of +#, fuzzy +msgid "CA certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:33 of +#, fuzzy +msgid "server certificate." +msgstr "Certificats" + +#: flwr.server.app.start_server:34 of +#, fuzzy +msgid "server private key." +msgstr "stratégie.du.serveur" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:42 of +#, fuzzy +msgid "Starting an insecure server:" +msgstr "Démarrer le serveur" + +#: flwr.server.app.start_server:46 of +#, fuzzy +msgid "Starting an SSL-enabled server:" +msgstr "Démarrer le serveur" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 +#, fuzzy +msgid "strategy" +msgstr "stratégie.du.serveur" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of #, fuzzy -msgid "QFedAvg" -msgstr "DP-FedAvg" +msgid "Bulyan strategy." +msgstr "Stratégies intégrées" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -#, fuzzy -msgid "Strategy" -msgstr "stratégie.du.serveur" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -#, fuzzy -msgid "Aggregate evaluation results." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 #: of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -#, fuzzy -msgid "Aggregate training results." -msgstr "Résultats globaux de l'évaluation." - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -#, fuzzy -msgid "Evaluate the current model parameters." -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of #, fuzzy -msgid "Initialize the (global) model parameters." -msgstr "Initialise le modèle global" +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#, fuzzy +msgid "Federated Averaging strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +#, fuzzy +msgid "Federated Averaging with Momentum strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "Configuration de l'évaluation fédérée" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +#, fuzzy +msgid "Federated Optim strategy." +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of #, fuzzy -msgid "workflow" -msgstr "Flux de travail" +msgid "Federated Optimization strategy." +msgstr "Stratégie de moyenne fédérée." -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -#, fuzzy -msgid "DefaultWorkflow" -msgstr "Flux de travail" - -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -#, fuzzy -msgid "SecAggPlusWorkflow" -msgstr "Flux de travail" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of -msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of -msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of -msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of -msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of -msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "serveur.stratégie.Stratégie" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of -msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of -msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of -msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of -msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -msgid "Too large `max_weight` may compromise the precision of the quantization." +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of -msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of -msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of -msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." -msgstr "" +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +#, fuzzy +msgid "Initial global model parameters." +msgstr "Initialise le modèle global" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -#, fuzzy -msgid "SecAggWorkflow" -msgstr "Flux de travail" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of -msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." -msgstr "" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "Aggregate evaluation losses using weighted average." +msgstr "Résultats globaux de l'évaluation." -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of -msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." -msgstr "" +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "Aggregate fit results using Bulyan." +msgstr "Résultats globaux de l'évaluation." -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of -msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." -msgstr "" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +#, fuzzy +msgid "Configure the next round of evaluation." +msgstr "Configuration de l'évaluation côté serveur" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "simulation" -msgstr "Simulation de moniteur" +msgid "Initialize global model parameters." +msgstr "Initialise le modèle global" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -#, fuzzy -msgid "Start a Ray-based Flower simulation server." -msgstr "Simulation de moniteur" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -#, fuzzy -msgid "run\\_simulation" -msgstr "Simulation de moniteur" - -#: flwr.simulation.run_simulation.run_simulation:3 of -msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." -msgstr "" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" -#: flwr.simulation.run_simulation.run_simulation:6 of -msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of -msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation losses using the given strategy." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of -msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -#, fuzzy -msgid "start\\_simulation" -msgstr "démarrer_simulation" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:16 of -msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:25 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." msgstr "" -#: flwr.simulation.app.start_simulation:31 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +#, fuzzy +msgid "The current round of federated learning." +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "Changelog" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" -#: ../../source/ref-changelog.md:3 -#, fuzzy -msgid "v1.9.0 (2024-06-10)" -msgstr "v1.3.0 (2023-02-06)" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" -msgstr "Merci à nos contributeurs" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -"Nous tenons à remercier tout particulièrement tous les contributeurs qui " -"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" -#: ../../source/ref-changelog.md:9 -msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" -msgstr "Quoi de neuf ?" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "" -#: ../../source/ref-changelog.md:13 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: ../../source/ref-changelog.md:17 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of #, fuzzy +msgid "Create a strategy:" +msgstr "stratégie.du.serveur" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:21 -msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: ../../source/ref-changelog.md:23 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:25 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of #, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:31 -#, fuzzy +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:35 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 #, fuzzy -msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: ../../source/ref-changelog.md:39 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: ../../source/ref-changelog.md:43 -#, fuzzy -msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:47 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of #, fuzzy +msgid "Add noise to the aggregated parameters." +msgstr "Puis sérialise le résultat agrégé :" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:49 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:51 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:61 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:65 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:67 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:69 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 #, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "Confidentialité différentielle" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:71 -msgid "As always, Flower code examples have received many updates." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:73 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" -msgstr "Dépréciations" - -#: ../../source/ref-changelog.md:77 -#, fuzzy -msgid "**Deprecate Python 3.8 support**" -msgstr "**Créer le PR**" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" -#: ../../source/ref-changelog.md:79 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:81 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:83 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:85 -#, fuzzy +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:87 -msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." msgstr "" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" -msgstr "Changements incompatibles" +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:93 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:95 -#, fuzzy -msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:97 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:99 -#, fuzzy +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:103 -#, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:117 -#, fuzzy -msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" -msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "FedAdagrad" -#: ../../source/ref-changelog.md:119 -msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of #, fuzzy -msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:123 -msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -#: ../../source/ref-changelog.md:125 -#, fuzzy +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:127 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:129 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:133 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:135 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:137 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:139 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:141 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy -msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +msgid "FedAdam" +msgstr "FedAdagrad" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:143 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:145 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:147 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:149 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:151 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:153 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:155 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:157 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:159 -msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: ../../source/ref-changelog.md:161 +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" -msgstr "Aucun" - -#: ../../source/ref-changelog.md:167 -#, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:173 +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:177 -#, fuzzy -msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:179 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:181 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:183 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:185 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:187 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:189 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:191 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:193 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:195 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#, fuzzy +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:197 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:199 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-changelog.md:201 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of #, fuzzy +msgid "Deserialize NumPy array from bytes." +msgstr "Désérialise le tableau numérique NumPy à partir d'octets." + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:203 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:205 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:207 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:209 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:211 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of #, fuzzy +msgid "Serialize NumPy array to bytes." +msgstr "Sérialise le tableau numérique NumPy en octets." + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:213 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:215 -#, fuzzy +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:217 -msgid "Many Flower code examples received substantial updates." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of #, fuzzy -msgid "**Update Flower Baselines**" -msgstr "Demande pour une nouvelle Flower Baseline" +msgid "Convert parameters object to NumPy weights." +msgstr "Convertit l'objet des paramètres en ndarrays NumPy." -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 #, fuzzy -msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:222 -#, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:223 -#, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:224 -#, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:225 -#, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:226 -#, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-changelog.md:228 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:230 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:232 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:234 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:236 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:240 -#, fuzzy -msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:242 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:244 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using median." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:248 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:250 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:252 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:256 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:258 -msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: ../../source/ref-changelog.md:260 -#, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" -#: ../../source/ref-changelog.md:266 -msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:270 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:272 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:276 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:280 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:282 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:284 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:286 -msgid "Add gRPC request-response capability to the Android SDK." +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" msgstr "" -#: ../../source/ref-changelog.md:288 -#, fuzzy -msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.strategy.fedprox.FedProx:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: ../../source/ref-changelog.md:292 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: ../../source/ref-changelog.md:296 -msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: ../../source/ref-changelog.md:298 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:300 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: ../../source/ref-changelog.md:302 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:304 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:306 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:314 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:316 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:318 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:320 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:322 -#, fuzzy -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "" -#: ../../source/ref-changelog.md:324 -#, fuzzy -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-changelog.md:326 -#, fuzzy -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:328 -#, fuzzy -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:330 -#, fuzzy -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-changelog.md:332 -#, fuzzy -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-changelog.md:334 -#, fuzzy -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" -#: ../../source/ref-changelog.md:336 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:338 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:340 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:342 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Mettre à jour les outils de développement " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:344 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:346 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 -msgid "Flower received many improvements under the hood, too many to list here." +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " -"pour être énumérées ici." -#: ../../source/ref-changelog.md:352 -#, fuzzy +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:354 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation metrics using average." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:356 -#, fuzzy -msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:360 -#, fuzzy -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.4.0 (2023-04-21)" - -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:370 -#, fuzzy +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:372 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:378 -msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: ../../source/ref-changelog.md:380 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:384 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:386 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:388 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:390 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:392 -#, fuzzy -msgid "**Deprecate Python 3.7**" -msgstr "**Créer le PR**" - -#: ../../source/ref-changelog.md:394 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:396 -#, fuzzy +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:398 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 #, fuzzy +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:400 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:402 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:404 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:406 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:408 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:410 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:412 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:414 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:416 -msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: ../../source/ref-changelog.md:418 -#, fuzzy -msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:420 -#, fuzzy -msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:422 -msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -#: ../../source/ref-changelog.md:424 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:426 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:428 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" - -#: ../../source/ref-changelog.md:430 -#, fuzzy -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:434 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:436 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" - -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " -"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/ref-changelog.md:456 -msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " -"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" et un [exemple de code] " -"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " -"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " -"XGBoost." -#: ../../source/ref-changelog.md:458 +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "" + +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -"**Introduction du SDK iOS (aperçu)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:460 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " -"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " -"disposons désormais d'un SDK swift iOS présent sous " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" qui facilitera grandement le processus de création d'applications. Pour " -"présenter son utilisation, l'[exemple " -"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " -"été mis à jour !" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" -" \"** ([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/ref-changelog.md:464 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of #, fuzzy +msgid "Aggregate fit results using Krum." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"Un nouveau [tutoriel d'entrée de gamme] " -"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " -"documentation explique les bases de l'apprentissage fédéré. Il permet à " -"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" -" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " -"l'apprentissage fédéré !" -#: ../../source/ref-changelog.md:466 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Introduire une nouvelle fleur Référence : FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:468 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " -"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " -"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " -"qui vise à rendre la convergence plus robuste dans des contextes " -"hétérogènes." -#: ../../source/ref-changelog.md:470 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " -"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:472 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Cette nouvelle ligne de base reproduit une expérience évaluant les " -"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" -" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " -"2018)] (https://arxiv.org/abs/1812.01097)." -#: ../../source/ref-changelog.md:474 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Introduire l'API REST (expérimentale)** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:476 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#, fuzzy +msgid "QFedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"Une nouvelle API REST a été introduite comme alternative à la pile de " -"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " -"prend en charge que les clients anonymes." -#: ../../source/ref-changelog.md:478 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Remarque : l'API REST est encore expérimentale et est susceptible de " -"changer de manière significative au fil du temps." -#: ../../source/ref-changelog.md:480 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**Améliorer l'API (expérimentale) du pilote** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:482 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"L'API du pilote est encore une fonction expérimentale, mais cette version" -" introduit quelques améliorations majeures. L'une des principales " -"améliorations est l'introduction d'une base de données SQLite pour " -"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " -"autre amélioration est que les tâches (instructions ou résultats) qui ont" -" été livrées seront désormais supprimées, ce qui améliore " -"considérablement l'efficacité de la mémoire d'un serveur Flower " -"fonctionnant depuis longtemps." -#: ../../source/ref-changelog.md:484 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**Répare les problèmes de déversement liés à Ray pendant les " -"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/ref-changelog.md:486 -#, fuzzy +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"Lors de l'exécution de longues simulations, `ray` déversait parfois " -"d'énormes quantités de données qui rendaient l'entraînement incapable de " -"continuer. ce problème est maintenant corrigé ! 🎉" -#: ../../source/ref-changelog.md:488 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/ref-changelog.md:490 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"TabNet est un cadre puissant et flexible pour former des modèles " -"d'apprentissage automatique sur des données tabulaires. Nous avons " -"maintenant un exemple fédéré utilisant Flower : [quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)." -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#, fuzzy +msgid "Strategy" +msgstr "stratégie.du.serveur" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**Ajouter un nouveau guide pratique pour le suivi des simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:494 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy +msgid "Aggregate evaluation results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Nous avons maintenant un guide de documentation pour aider les " -"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/ref-changelog.md:496 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate training results." +msgstr "Résultats globaux de l'évaluation." + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**Ajouter des mesures de formation à** `History` **objet pendant les " -"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:498 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " -"les mesures d'entraînement, mais les versions précédentes " -"n'enregistraient pas les résultats dans l'objet `History`. c'est " -"désormais le cas !" -#: ../../source/ref-changelog.md:500 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/ada" -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +#, fuzzy +msgid "Evaluate the current model parameters." +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" -#: ../../source/ref-changelog.md:514 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/ref-changelog.md:518 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +#, fuzzy +msgid "Initialize the (global) model parameters." +msgstr "Initialise le modèle global" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " -"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/ref-changelog.md:520 -msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " -"qui peut être utilisé pour identifier la charge de travail à laquelle une" -" tâche appartient. Elle prend également en charge un nouveau `group_id` " -"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " -"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " -"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/ref-changelog.md:522 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " -"flotte soit configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/ref-changelog.md:524 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" -msgstr "" -"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " -"API) peut maintenant configurer l'adresse du serveur de Driver API (via " -"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " -"de son démarrage :" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" -#: ../../source/ref-changelog.md:526 -#, fuzzy +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" -"address \"0.0.0.0:8086\" ``" - -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." -#: ../../source/ref-changelog.md:530 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " -"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:532 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " -"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " -"ici : [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)." -#: ../../source/ref-changelog.md:534 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " -"dernières versions d'Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/ref-changelog.md:536 +#: ../../source/ref-api/flwr.server.workflow.rst:2 #, fuzzy +msgid "workflow" +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -"L'exemple de code Android a reçu une mise à jour substantielle : le " -"projet est compatible avec Flower 1.0 et les versions ultérieures, " -"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " -"est mis à jour pour être compatible avec les outils Android les plus " -"récents." -#: ../../source/ref-changelog.md:538 -msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -"**Ajouter une nouvelle stratégie `FedProx`** " -"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:540 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -"Cette " -"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" est presque identique à " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " -"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " -"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" -" rapport aux modèles globaux." -#: ../../source/ref-changelog.md:542 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -"**Ajouter de nouvelles métriques aux événements de télémétrie** " -"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/ref-changelog.md:544 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -"Une structure d'événements mise à jour permet, par exemple, de regrouper " -"des événements au sein d'une même charge de travail." -#: ../../source/ref-changelog.md:546 -msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -"**Ajouter une nouvelle section de tutoriel sur les stratégies " -"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/ref-changelog.md:548 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "Flux de travail" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 #, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" -" : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" -"-Strategy-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:550 -msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -"**Ajouter une nouvelle section de tutoriel sur la sérialisation " -"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/ref-changelog.md:552 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " -"traite de la sérialisation personnalisée : [Ouvrir dans " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" -"-Client-and-NumPyClient-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:554 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/ada" -#: ../../source/ref-changelog.md:558 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -"**Mise à jour de la documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " -"autre étape dans notre effort pour faire de la documentation de Flower la" -" meilleure documentation de tout projet. Reste à l'écoute et comme " -"toujours, n'hésite pas à nous faire part de tes commentaires !" - -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" -#: ../../source/ref-changelog.md:572 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " -"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/ref-changelog.md:576 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/ref-changelog.md:578 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -"Au cours des prochaines semaines, nous publierons un certain nombre de " -"nouvelles implémentations de référence utiles en particulier pour les " -"nouveaux venus en FL. Elles revisiteront généralement des articles bien " -"connus de la littérature, et seront adaptées à l'intégration dans votre " -"propre application ou à l'expérimentation, afin d'approfondir votre " -"connaissance de FL en général. La publication d'aujourd'hui est la " -"première de cette série. [Lire la " -"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" -"cnn/)" -#: ../../source/ref-changelog.md:580 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -"**Améliorer la prise en charge des GPU dans les simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:582 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " -"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " -"certaines des leçons durement apprises lors de la mise à l'échelle des " -"simulations dans des environnements de grappes de GPU. De nouveaux " -"paramètres par défaut rendent l'exécution des simulations basées sur les " -"GPU beaucoup plus robuste." -#: ../../source/ref-changelog.md:584 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " -"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/ref-changelog.md:586 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " -"toujours été faciles à utiliser sur les instances GPU. Nous les avons " -"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " -"Découvre les carnets mis à jour ici :" -#: ../../source/ref-changelog.md:588 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -"[Une introduction à l'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" -#: ../../source/ref-changelog.md:589 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -"[Stratégies d'apprentissage fédéré] " -"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/ref-changelog.md:590 -#, fuzzy -msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -"[Construire une stratégie] " -"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" -"PyTorch.html)" -#: ../../source/ref-changelog.md:591 -#, fuzzy -msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" -"and-NumPyClient-PyTorch.html)" -#: ../../source/ref-changelog.md:593 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -"**Introduire la télémétrie optionnelle** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:595 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -"À la suite d'une [demande de commentaires] " -"(https://github.com/adap/flower/issues/1534) de la part de la communauté," -" le projet open-source Flower introduit la collecte optionnelle de " -"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " -"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " -"comment Flower est utilisé et quels sont les défis auxquels les " -"utilisateurs peuvent être confrontés." -#: ../../source/ref-changelog.md:597 -#, fuzzy +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** Restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " -"suite.](https://flower.ai/docs/telemetry.html)." -#: ../../source/ref-changelog.md:599 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -"**([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-changelog.md:601 -msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" -" permettra de créer des applications Federated Learning et Federated " -"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " -"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " -"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" -" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/ref-changelog.md:603 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"L'API du pilote permet également un nouveau mode d'exécution dans lequel " -"le serveur s'exécute indéfiniment. Plusieurs charges de travail " -"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " -"leur exécution indépendamment du serveur. Ceci est particulièrement utile" -" pour les utilisateurs qui souhaitent déployer Flower en production." -#: ../../source/ref-changelog.md:605 -msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " -"attendons tes commentaires avec impatience !" -#: ../../source/ref-changelog.md:607 -msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" -msgstr "" -"Remarque : *L'API du pilote est encore expérimentale et est susceptible " -"de changer de manière significative au fil du temps.*" - -#: ../../source/ref-changelog.md:609 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/ref-changelog.md:611 -msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " -"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." -#: ../../source/ref-changelog.md:613 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:615 -msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " -"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " -"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." -#: ../../source/ref-changelog.md:617 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "Flux de travail" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " -"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/ref-changelog.md:619 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" -" compatible avec la dernière version de Flower." -#: ../../source/ref-changelog.md:621 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -"**Améliorations générales** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:625 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -"**Documentation mise à jour** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:629 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -"L'un des points forts est le nouveau [guide du premier contributeur] " -"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" -" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" - -#: ../../source/ref-changelog.md:639 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -"Nous aimerions **remercier tout particulièrement** tous les contributeurs" -" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " -"shortlog`) :" -#: ../../source/ref-changelog.md:641 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" -#: ../../source/ref-changelog.md:645 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/ref-changelog.md:647 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -"Le premier aperçu (expérimental) des wrappers enfichables de " -"confidentialité différentielle permet de configurer et d'utiliser " -"facilement la confidentialité différentielle (DP). Les wrappers DP " -"enfichables permettent une utilisation agnostique du cadre **et** de la " -"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " -"voir les documents de Flower, un nouvel explicatif va plus loin dans les " -"détails." -#: ../../source/ref-changelog.md:649 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**Nouvel exemple de code CoreML pour iOS** " -"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:651 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " -"clients Flower peuvent être construits pour iOS. L'exemple de code " -"contient à la fois des composants Flower iOS SDK qui peuvent être " -"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " -"sur CoreML." -#: ../../source/ref-changelog.md:653 +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "Simulation de moniteur" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -"**Nouvelle stratégie de FedMedian** " -"([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/ref-changelog.md:655 -msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " -"(FedMedian) par [Yin et al., 2018] " -"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-changelog.md:657 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" +msgstr "serveur.stratégie.Stratégie" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -"**Log** `Client` **exceptions dans le moteur de client virtuel** " -"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-changelog.md:659 +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "Simulation de moniteur" + +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -"Toutes les exceptions `Client` qui se produisent dans le VCE sont " -"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" -" `Stratégie` configurée (via l'argument `failures`)." -#: ../../source/ref-changelog.md:661 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -"**Améliorer le moteur du client virtuel** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/ref-changelog.md:663 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " -"dictionnaire `client_resources` a été remplacé par `float` pour permettre" -" l'allocation de fractions de ressources." -#: ../../source/ref-changelog.md:665 -msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" -#: ../../source/ref-changelog.md:667 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -"Le moteur de client virtuel prend désormais en charge les méthodes " -"optionnelles `Client` (et `NumPyClient`)." -#: ../../source/ref-changelog.md:669 +#: flwr.simulation.run_simulation.run_simulation:21 of msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -"**Fournir des informations de type aux paquets en utilisant** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/ref-changelog.md:671 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " -"indiquant que le paquet est typé. Cela permet de prendre en charge le " -"typage pour les projets ou les paquets qui utilisent `flwr` en leur " -"permettant d'améliorer leur code à l'aide de vérificateurs de types " -"statiques comme `mypy`." -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "démarrer_simulation" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "Changelog" + +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "Merci à nos contributeurs" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -"**Exemple de code mis à jour** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"Nous tenons à remercier tout particulièrement tous les contributeurs qui " +"ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/ref-changelog.md:675 +#: ../../source/ref-changelog.md:9 msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " -"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/ref-changelog.md:677 +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "Améliorations facultatives" + +#: ../../source/ref-changelog.md:13 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -"**Documentation mise à jour** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:15 +#, fuzzy msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -"Il y a eu tellement de mises à jour de la documentation que cela n'a même" -" pas de sens de les énumérer individuellement." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:681 +#: ../../source/ref-changelog.md:17 +#, fuzzy msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -"**Documentation restructurée** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:683 +#: ../../source/ref-changelog.md:19 +#, fuzzy msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -"La documentation a été restructurée pour faciliter la navigation. Ce " -"n'est que la première étape d'un effort plus important visant à faire de " -"la documentation de Flower la meilleure documentation de tous les projets" +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:21 +#, fuzzy msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -"**Ouvrir dans le bouton Colab** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:23 +#, fuzzy msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " -"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " -"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " -"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " -"il te suffit d'un simple clic." +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:689 +#: ../../source/ref-changelog.md:25 +#, fuzzy msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:691 +#: ../../source/ref-changelog.md:27 +#, fuzzy msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" -" parties couvrant les stratégies personnalisées (encore WIP) et la " -"distinction entre `Client` et `NumPyClient`. Les parties un et deux " -"existantes ont également été améliorées (beaucoup de petits changements " -"et de corrections)." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" +#: ../../source/ref-changelog.md:29 +#, fuzzy +msgid "" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" +msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:699 -msgid "Highlights" -msgstr "Points forts" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "Changements incompatibles" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" +#: ../../source/ref-changelog.md:41 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" +msgstr "Quoi de neuf ?" -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" -msgstr "`get_parameters` configurable" +#: ../../source/ref-changelog.md:45 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" +msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:47 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -"Des tonnes de petits nettoyages d'API résultant en une expérience plus " -"cohérente pour les développeurs" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:49 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -"Nous tenons à remercier **particulièrement** tous les contributeurs qui " -"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors)) :" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." +msgstr "" + +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:55 msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -"**Tous les arguments doivent être passés comme des arguments de mot-clé**" -" ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/ref-changelog.md:716 -#, fuzzy -msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -"Le code qui utilise des arguments positionnels (par exemple, " -"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" -"clé pour chaque argument positionnel (par exemple, " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:59 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -"**Introduire l'objet de configuration** `ServerConfig` **dans** " -"`start_server` **et** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:60 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " -"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " -"attendent maintenant un objet de configuration de type " -"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " -"le dict de configuration précédent, mais il rend l'écriture de code " -"sécurisé plus facile et les valeurs des paramètres par défaut plus " -"transparentes." -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:61 msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:63 +#, fuzzy msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"Les paramètres de stratégie intégrés suivants ont été renommés pour " -"améliorer la lisibilité et la cohérence avec d'autres API :" - -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:65 msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -"**Mettre à jour les arguments par défaut des stratégies intégrées** " -"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/ref-changelog.md:732 -msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" -" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " -"les clients actuellement disponibles pour l'entraînement et l'évaluation." -" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " -"peuvent retrouver le comportement antérieur en initialisant la stratégie " -"de la manière suivante :" - -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-changelog.md:68 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -"**Ajouter** `server_round` **à** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-changelog.md:69 msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre." -#: ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:70 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" -" ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:742 +#: ../../source/ref-changelog.md:72 +#, fuzzy msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " -"maintenant trois paramètres : (1) le cycle actuel " -"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" -" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" -" (`config`)." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:74 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:76 +#, fuzzy msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " -"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " -"`aggregate_evaluate`) reçoivent le cycle actuel " -"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" -" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " -"renommé de `rnd` à `server_round`." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:748 +#: ../../source/ref-changelog.md:78 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" - -#: ../../source/ref-changelog.md:750 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:80 +#, fuzzy msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -"**Supprimer les stratégies expérimentales** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:82 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " -"`FedFSv0`, `FedFSv1`)." -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:84 +#, fuzzy msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:86 msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " -"rendre compte de la nature de ce type." -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:88 +#, fuzzy msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -msgstr "" -"**Supprimez l'ancien** `force_final_distributed_eval` **de** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" - -#: ../../source/ref-changelog.md:762 -msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " -"été un artefact historique, dans cette version il a finalement disparu " -"pour de bon." +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:764 -msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" -msgstr "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" +msgstr "Dépréciations" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:102 +#, fuzzy msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -"La méthode `get_parameters` accepte maintenant un dictionnaire de " -"configuration, tout comme `get_properties`, `fit`, et `evaluate`." +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:104 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" -" `config` **paramètre** " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:113 +#, fuzzy msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -"La fonction `start_simulation` accepte maintenant un dictionnaire de " -"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" -" cohérence entre `start_simulation` et `start_server` et facilite la " -"transition entre les deux." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:115 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:117 +#, fuzzy msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -"La version précédente de Flower a introduit la prise en charge " -"expérimentale de Python 3.10, cette version déclare la prise en charge de" -" Python 3.10 comme stable." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:778 -msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " -"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:121 +#, fuzzy msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " -"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " -"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " -"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " -"l'évaluation centralisée !" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:123 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:784 -msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -"Comme pour `start_server`, `start_simulation` accepte maintenant une " -"instance complète de `Server`. Cela permet aux utilisateurs de " -"personnaliser fortement l'exécution des expériences et ouvre la porte à " -"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " -"virtuel." -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:135 +#, fuzzy msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -"**Mettre à jour les exemples de code** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:137 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -"De nombreux exemples de code ont reçu de petites ou même de grandes mises" -" à jour de maintenance" - -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" - -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" - -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch` (démarrage rapide)" - -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" - -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow` (en anglais)" - -#: ../../source/ref-changelog.md:797 -msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "" -"**Supprime l'exemple de simulation obsolète** " -"([#1328](https://github.com/adap/flower/pull/1328))" +#: ../../source/ref-changelog.md:139 +#, fuzzy +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:145 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -"Supprime l'exemple obsolète `simulation` et renomme " -"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" -" au nom de `simulation_pytorch`" -#: ../../source/ref-changelog.md:801 +#: ../../source/ref-changelog.md:149 +#, fuzzy msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"**Mise à jour de la documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:151 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -"Une mise à jour substantielle de la documentation corrige plusieurs " -"petits problèmes de rendu, rend les titres plus succincts pour améliorer " -"la navigation, supprime une bibliothèque obsolète, met à jour les " -"dépendances de la documentation, inclut le module `flwr.common` dans la " -"référence de l'API, inclut le support de la documentation basée sur le " -"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " -"nombre de détails plus petits !" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" -msgstr "**Mises à jour mineures**" - -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:153 +#, fuzzy msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " -"([#1266](https://github.com/adap/flower/pull/1266))" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:155 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -"Ajouter une connexion gRPC sécurisée à l'exemple de code " -"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:157 +#, fuzzy msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" "Mettre à jour les outils de développement " "([#1231](https://github.com/adap/flower/pull/1231), " @@ -19637,12748 +17900,20574 @@ msgstr "" "[#1301](https://github.com/adap/flower/pull/1301), " "[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:159 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"Renomme les messages ProtoBuf pour améliorer la cohérence " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" - -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:161 msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:163 #, fuzzy msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -"La première version préliminaire de Flower Baselines est arrivée ! Nous " -"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " -"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html). Avec cette première version préliminaire, nous invitons " -"également la communauté à [contribuer à leurs propres lignes de " -"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:165 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -"**SDK client C++ (aperçu) et exemple de code** " -"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:167 +#, fuzzy msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " -"code de démarrage rapide qui démontre un client C++ simple utilisant le " -"SDK." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:169 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " -"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:171 +#, fuzzy msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -"Python 3.10 est la dernière version stable de Python et Python 3.11 " -"devrait sortir en octobre. Cette version de Flower ajoute une prise en " -"charge expérimentale pour les deux versions de Python." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:173 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -"**Agréger des mesures personnalisées grâce à des fonctions fournies par " -"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:175 +#, fuzzy msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -"Les stratégies intégrées prennent en charge deux nouveaux arguments, " -"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " -"permettent de passer des fonctions d'agrégation de métriques " -"personnalisées." +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:177 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -"**Temps d'attente configurable par l'utilisateur** " -"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:179 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " -"valeur `float` en secondes), le serveur attendra *au moins* " -"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:181 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " -"l'évaluation centralisée dans toutes les stratégies intégrées** " -"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:183 +#, fuzzy msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -"Les stratégies intégrées peuvent maintenant effectuer une évaluation " -"fédérée (c'est-à-dire côté client) et une évaluation centralisée " -"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " -"être désactivée en réglant `fraction_eval` sur `0.0`." +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:185 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -"**Deux nouveaux tutoriels Jupyter Notebook** " -"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:187 +#, fuzzy msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " -"expliquent les fonctionnalités de base et intermédiaires de Flower :" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:189 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -"*Introduction à l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:191 +#, fuzzy msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:193 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" -" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:195 #, fuzzy msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " -"momentum du serveur [Hsu et al., 2019]." +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:852 +#: ../../source/ref-changelog.md:197 msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -"**Nouvel exemple de code PyTorch avancé** " -"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/ref-changelog.md:854 +#: ../../source/ref-changelog.md:199 +#, fuzzy msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " -"fleur avancés avec PyTorch." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:856 +#: ../../source/ref-changelog.md:201 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -"**Nouvel exemple de code JAX** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-changelog.md:858 +#: ../../source/ref-changelog.md:203 +#, fuzzy msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " -"l'apprentissage fédéré avec JAX et Flower." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" + +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "Améliorations facultatives" -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:209 +#, fuzzy msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " -"initialisé dans `start_simulation` " -"([#1177](https://github.com/adap/flower/pull/1177))" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:211 +#, fuzzy msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " -"paramètre de `start_simulation` " -"([#1171](https://github.com/adap/flower/pull/1171))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:864 +#: ../../source/ref-changelog.md:213 #, fuzzy msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -"Nouvelle documentation pour [mettre en œuvre des " -"stratégies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:215 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -"Nouveau thème de documentation adapté aux mobiles " -"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/ref-changelog.md:866 -msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " -"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:221 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -"**Supprime la prise en charge obsolète de Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:223 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -"**Supprimez KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:225 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -"**Supprimer les installations supplémentaires no-op dépréciées** " -"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:229 +#, fuzzy msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" -" ([#869](https://github.com/adap/flower/pull/869))" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:231 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:233 +#, fuzzy msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -"**Supprime la stratégie DefaultStrategy qui est obsolète** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:876 +#: ../../source/ref-changelog.md:235 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -"**Supprimer la prise en charge obsolète de la valeur de retour de la " -"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:237 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:243 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"**Supprime la prise en charge obsolète du passage des paramètres initiaux" -" en tant que ndarrays NumPy** " -"([#1142](https://github.com/adap/flower/pull/1142))" - -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:247 +#, fuzzy msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -"**Amélioration de la compatibilité du moteur de client virtuel avec " -"Jupyter Notebook / Google Colab** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:249 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -"Les simulations (utilisant le moteur de client virtuel via " -"`start_simulation`) fonctionnent maintenant plus facilement sur les " -"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " -"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:251 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"**Nouvel exemple de code Jupyter Notebook** " -"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:253 +#, fuzzy msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"Un nouvel exemple de code (`quickstart_simulation`) démontre des " -"simulations de Flower en utilisant le moteur de client virtuel via " -"Jupyter Notebook (y compris Google Colab)." +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:255 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -"**Propriétés du client (aperçu des fonctionnalités)** " -"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:257 +#, fuzzy msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -"Les clients peuvent implémenter une nouvelle méthode `get_properties` " -"pour permettre aux stratégies côté serveur d'interroger les propriétés du" -" client." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:895 +#: ../../source/ref-changelog.md:259 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -"**Support expérimental d'Android avec TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:261 +#, fuzzy msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" -" la fois agnostique au niveau du client et du cadre de travail. On peut " -"intégrer des plates-formes client arbitraires et avec cette version, " -"l'utilisation de Flower sur Android est devenue beaucoup plus facile." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:899 +#: ../../source/ref-changelog.md:263 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " -"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " -"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " -"part entière et une implémentation unifiée de `FedAvg` intégrant la " -"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:265 +#, fuzzy msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -"**Rendre le temps de garde gRPC configurable par l'utilisateur et " -"diminuer le temps de garde par défaut** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:903 +#: ../../source/ref-changelog.md:267 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." -msgstr "" -"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " -"compatibilité de Flower avec davantage d'environnements cloud (par " -"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " -"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " -"spécifiques." +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" +msgstr "" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:269 +#, fuzzy msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " -"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:271 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " -"différentiellement privé avec Opacus, PyTorch et Flower." -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:273 +#, fuzzy msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -"**Nouvel exemple de code pour les Transformers à visage embrassant** " -"([#863](https://github.com/adap/flower/pull/863))" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:275 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -"Un nouvel exemple de code (`quickstart_huggingface`) démontre " -"l'utilisation des transformateurs Hugging Face avec Flower." -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:277 +#, fuzzy msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -"**Nouvel exemple de code MLCube** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:279 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" -" MLCube avec Flower." -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:281 +#, fuzzy msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"**([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:283 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -"SSL permet d'établir des connexions cryptées et sécurisées entre les " -"clients et les serveurs. Cette version met en open-source " -"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " -"communication cryptés accessibles à tous les utilisateurs de Flower." -#: ../../source/ref-changelog.md:921 +#: ../../source/ref-changelog.md:285 +#, fuzzy msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:923 +#: ../../source/ref-changelog.md:287 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " -"sur l'optimisation fédérée adaptative." -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:289 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -"**Initialise** `start_simulation` **avec une liste d'ID de clients** " -"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:291 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -"`start_simulation` peut maintenant être appelé avec une liste " -"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " -"identifiants seront passés à `client_fn` chaque fois qu'un client doit " -"être initialisé, ce qui peut faciliter le chargement de partitions de " -"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:293 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -"Mettre à jour le calcul de `num_examples` dans les exemples de code " -"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/ref-changelog.md:932 +#: ../../source/ref-changelog.md:295 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -"Exposer la version de Flower à travers `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:297 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -"`start_server` dans `app.py` renvoie maintenant un objet `History` " -"contenant les métriques de l'entraînement " -"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-changelog.md:934 +#: ../../source/ref-changelog.md:299 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/ref-changelog.md:935 -msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "" -"Augmente le temps de sommeil après le démarrage du serveur à trois " -"secondes dans tous les exemples de code " -"([#1086](https://github.com/adap/flower/pull/1086))" - -#: ../../source/ref-changelog.md:936 +#: ../../source/ref-changelog.md:301 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -"Ajout d'une nouvelle section FAQ à la documentation " -"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:303 +#, fuzzy msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -"Et bien d'autres changements sous le capot, des mises à jour de la " -"bibliothèque, des modifications de la documentation et des améliorations " -"de l'outillage !" +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:941 -msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." msgstr "" -"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " -"release build** ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:307 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " -"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " -"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " -"supprimés dans une prochaine version." -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" - -#: ../../source/ref-changelog.md:949 -msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" -msgstr "" -"**Moteur expérimental de client virtuel** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +#: ../../source/ref-changelog.md:311 +#, fuzzy +msgid "**Deprecate Python 3.8 support**" +msgstr "**Créer le PR**" -#: ../../source/ref-changelog.md:951 +#: ../../source/ref-changelog.md:313 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -"L'un des objectifs de Flower est de permettre la recherche à grande " -"échelle. Cette version donne un premier aperçu (expérimental) d'une " -"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " -"client virtuel. Les clients virtuels permettent des simulations qui " -"s'étendent à un (très) grand nombre de clients sur une seule machine ou " -"une grappe de calcul. La façon la plus simple de tester la nouvelle " -"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" -" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:315 +#, fuzzy msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -"La fonction est encore expérimentale, il n'y a donc aucune garantie de " -"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " -"prime time et s'accompagne de quelques mises en garde connues. Cependant," -" les personnes curieuses sont encouragées à l'essayer et à faire part de " -"leurs réflexions." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:955 +#: ../../source/ref-changelog.md:317 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -"**Nouvelles stratégies intégrées** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-changelog.md:957 +#: ../../source/ref-changelog.md:319 +#, fuzzy msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " -"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:321 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " -"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/ref-changelog.md:960 +#: ../../source/ref-changelog.md:325 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"**Nouvel exemple de code PyTorch Lightning** " -"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:327 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -"**Nouvel exemple de code d'autocodage variationnel** " -"([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:329 +#, fuzzy msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -"**Nouvel exemple de code scikit-learn** " -"([#748](https://github.com/adap/flower/pull/748))" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:331 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"**Nouvelle stratégie expérimentale TensorBoard** " -"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:333 +#, fuzzy msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"Amélioration de l'exemple de code TensorFlow avancé " -"([#769](https://github.com/adap/flower/pull/769))" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:971 +#: ../../source/ref-changelog.md:335 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"Avertissement lorsque `min_available_clients` est mal configuré " -"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/ref-changelog.md:972 -msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" -msgstr "" -"Amélioration de la documentation sur le serveur gRPC " -"([#841](https://github.com/adap/flower/pull/841))" +#: ../../source/ref-changelog.md:337 +#, fuzzy +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:343 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"Amélioration du message d'erreur dans `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:347 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" -msgstr "" -"Exemple de code de démarrage rapide PyTorch amélioré " -"([#852](https://github.com/adap/flower/pull/852))" - -#: ../../source/ref-changelog.md:978 -msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -"**Désactivé l'évaluation finale distribuée** " -"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:349 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -"Le comportement précédent consistait à effectuer un dernier tour " -"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " -"souvent pas nécessaire (par exemple, lors de l'utilisation de " -"l'évaluation côté serveur). Le comportement précédent peut être activé en" -" passant `force_final_distributed_eval=True` à `start_server`." -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:351 +#, fuzzy msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -"**Renommé stratégie q-FedAvg** " -"([#802](https://github.com/adap/flower/pull/802))" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:984 +#: ../../source/ref-changelog.md:353 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " -"refléter la notation donnée dans l'article original (q-FFL est l'objectif" -" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " -"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " -"des raisons de compatibilité (elle sera supprimée dans une prochaine " -"version)." -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:355 +#, fuzzy msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:357 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " -"basé sur le moteur expérimental du client virtuel, qui deviendra la " -"nouvelle méthode par défaut pour effectuer la plupart des types de " -"simulations à grande échelle dans Flower. L'exemple existant a été " -"conservé à des fins de référence, mais il pourrait être supprimé à " -"l'avenir." - -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:359 +#, fuzzy msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -"**Nouvelles stratégies intégrées** " -"([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" -msgstr "(résumé) FedOpt" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:361 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -"**Métriques personnalisées pour le serveur et les stratégies** " -"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:363 +#, fuzzy msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"Le serveur Flower est maintenant totalement agnostique, toutes les " -"instances restantes de métriques spécifiques à une tâche (telles que " -"`accuracy`) ont été remplacées par des dictionnaires de métriques " -"personnalisées. Flower 0.15 a introduit la possibilité de passer un " -"dictionnaire contenant des métriques personnalisées du client au serveur." -" À partir de cette version, les métriques personnalisées remplacent les " -"métriques spécifiques à une tâche sur le serveur." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:1003 -#, fuzzy +#: ../../source/ref-changelog.md:365 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -"Les dictionnaires de métriques personnalisés sont maintenant utilisés " -"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " -"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " -"permettent aux fonctions d'évaluation passées aux stratégies intégrées " -"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " -"stratégies peuvent même renvoyer des dictionnaires de métriques " -"*agrégées* pour que le serveur puisse en garder la trace." -#: ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:367 #, fuzzy msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -"Les implémentations de Stratey doivent migrer leurs méthodes " -"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " -"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " -"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " -"`return loss, {\"accuracy\" : accuracy}`." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/ref-changelog.md:1007 +#: ../../source/ref-changelog.md:369 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " -"pris en charge), la compatibilité sera supprimée dans une prochaine " -"version." -#: ../../source/ref-changelog.md:1009 +#: ../../source/ref-changelog.md:371 +#, fuzzy msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" -msgstr "" -"**Avertissements de migration pour les fonctionnalités obsolètes** " -"([#690](https://github.com/adap/flower/pull/690))" - -#: ../../source/ref-changelog.md:1011 -msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." -msgstr "" -"Les versions antérieures de Flower ont souvent été migrées vers de " -"nouvelles API, tout en maintenant la compatibilité avec les anciennes " -"API. Cette version introduit des messages d'avertissement détaillés si " -"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " -"d'avertissement fournissent souvent des détails sur la façon de migrer " -"vers des API plus récentes, facilitant ainsi la transition d'une version " -"à l'autre." - -#: ../../source/ref-changelog.md:1013 -msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"Amélioration des docs et des docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" - -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" -msgstr "Exemple et documentation MXNet" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-changelog.md:1017 +#: ../../source/ref-changelog.md:373 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" -" fédération ([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/ref-changelog.md:1021 +#: ../../source/ref-changelog.md:375 +#, fuzzy msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"**Serveur agnostique de sérialisation** " -"([#721](https://github.com/adap/flower/pull/721))" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:1023 +#: ../../source/ref-changelog.md:377 msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"Le serveur Flower est désormais totalement agnostique en matière de " -"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " -"représente les paramètres sous forme de tableaux NumPy désérialisés) a " -"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " -"Les objets `Parameters` sont totalement agnostiques en matière de " -"sérialisation et représentent les paramètres sous forme de tableaux " -"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " -"d'octets doivent être interprétés (par exemple, pour la " -"sérialisation/désérialisation)." -#: ../../source/ref-changelog.md:1025 +#: ../../source/ref-changelog.md:379 +#, fuzzy msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -"Les stratégies intégrées mettent en œuvre cette approche en gérant en " -"interne la sérialisation et la désérialisation de `Weights`. Les " -"implémentations de stratégies personnalisées ou tierces doivent être " -"mises à jour avec les définitions de méthodes de stratégie légèrement " -"modifiées. Les auteurs de stratégies peuvent consulter le PR " -"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " -"stratégies peuvent facilement migrer vers le nouveau format." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1027 +#: ../../source/ref-changelog.md:381 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"Déclassé `flwr.server.Server.evaluate`, utiliser " -"`flwr.server.Server.evaluate_round` à la place " -"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" - -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:383 +#, fuzzy msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"**Initialisation des paramètres côté serveur** " -"([#658](https://github.com/adap/flower/pull/658))" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:385 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -"Les paramètres du modèle peuvent maintenant être initialisés côté " -"serveur. L'initialisation des paramètres côté serveur fonctionne via une " -"nouvelle méthode `Strategy` appelée `initialize_parameters`." -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:387 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -"Les stratégies intégrées prennent en charge un nouvel argument du " -"constructeur appelé `initial_parameters` pour définir les paramètres " -"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " -"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:389 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " -"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " -"l'un des clients connectés ses paramètres et les utilisera comme " -"paramètres globaux initiaux)." -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:391 +#, fuzzy msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " -"`flwr.server.strategy.FedAvg`, qui est équivalent)" - -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:393 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " -"retour** ([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:395 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" -msgstr "" -"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " -"associant les clés `str` aux valeurs des types suivants : `bool`, " -"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " -"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " -"du serveur !" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" +msgstr "" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:401 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:407 msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -"Cette amélioration a également permis de rendre plus cohérents les types " -"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " -"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " -"d'exemples, et un dictionnaire contenant des valeurs arbitraires " -"spécifiques au problème comme la précision." -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:411 +#, fuzzy msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -"Au cas où tu te poserais la question : cette fonctionnalité est " -"compatible avec les projets existants, la valeur de retour supplémentaire" -" du dictionnaire est facultative. Le nouveau code doit cependant migrer " -"vers les nouveaux types de retour pour être compatible avec les " -"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " -"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " -"ci-dessous pour plus de détails." +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:413 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -"*Exemple de code:* note les valeurs de retour du dictionnaire " -"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/ref-changelog.md:1089 +#: ../../source/ref-changelog.md:415 +#, fuzzy msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -"**Généralisé** `config` **argument dans** `Client.fit` **et** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/ref-changelog.md:1091 +#: ../../source/ref-changelog.md:417 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " -"signifie que les valeurs du dictionnaire devaient être des chaînes. La " -"nouvelle version généralise cela pour permettre les valeurs des types " -"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/ref-changelog.md:1093 +#: ../../source/ref-changelog.md:419 +#, fuzzy msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"Cela signifie que l'on peut maintenant passer des valeurs presque " -"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " -"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " -"du côté client !" +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:1095 +#: ../../source/ref-changelog.md:421 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" -" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" - -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" -#: ../../source/ref-changelog.md:1116 +#: ../../source/ref-changelog.md:423 +#, fuzzy msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"Nouvel exemple : PyTorch de centralisé à fédéré " -"([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" -msgstr "Amélioration de la documentation" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:1118 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +#: ../../source/ref-changelog.md:425 +msgid "" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"Nouveau thème de documentation " -"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" - -#: ../../source/ref-changelog.md:1120 +#: ../../source/ref-changelog.md:427 +#, fuzzy msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -"Mise à jour de la documentation des exemples " -"([#549](https://github.com/adap/flower/pull/549))" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:429 msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -"Suppression de la documentation obsolète " -"([#548](https://github.com/adap/flower/pull/548))" - -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" -msgstr "Correction de bogues :" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:431 +#, fuzzy msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " -"déconnexion des clients est maintenant gérée dans " -"`flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." - -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" - -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" -msgstr "Changements importants :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:433 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" -msgstr "" -"Ajout d'un exemple pour les périphériques embarqués " -"([#507](https://github.com/adap/flower/pull/507))" - -#: ../../source/ref-changelog.md:1132 -msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:435 +#, fuzzy msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"Déclassement du paquet `flwr_example` et migration des exemples dans le " -"répertoire de premier niveau `examples` " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" - -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" - -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" -msgstr "Changements incompatibles :" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:437 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"Renommé les méthodes de stratégie " -"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" -" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " -"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" -" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " -"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " -"`Strategy` suivantes en conséquence :" - -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" - -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" - -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" - -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:439 +#, fuzzy msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"Déclassé `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " -"`FedAvg` à la place." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-changelog.md:1148 +#: ../../source/ref-changelog.md:441 +#, fuzzy msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -"Exemples simplifiés et lignes de base " -"([#484](https://github.com/adap/flower/pull/484))." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:443 +#, fuzzy msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " -"de stratégie ([#483](https://github.com/adap/flower/pull/483))." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-changelog.md:1150 +#: ../../source/ref-changelog.md:445 +#, fuzzy msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:447 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -"Amélioration des docstrings `Stratégie` " -"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/ref-example-projects.rst:2 +#: ../../source/ref-changelog.md:449 #, fuzzy -msgid "Example projects" -msgstr "Exemples de PyTorch" - -#: ../../source/ref-example-projects.rst:4 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " -"montrent comment Flower peut être utilisé pour fédérer différents types " -"de pipelines d'apprentissage automatique existants, qui s'appuient " -"généralement sur des frameworks d'apprentissage automatique populaires " -"tels que `PyTorch `_ ou `TensorFlow " -"`_." - -#: ../../source/ref-example-projects.rst:10 -#, fuzzy -msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" -msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-example-projects.rst:14 -msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " -"d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 #, fuzzy -msgid "" -"`Quickstart TensorFlow (Code) " -"`_" -msgstr "" -"`Quickstart TensorFlow (Code) " -"`_" +msgid "**Update Flower Baselines**" +msgstr "Demande pour une nouvelle Flower Baseline" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-changelog.md:455 #, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" -msgstr "" -"`Quickstart TensorFlow (Tutorial) `_" - -#: ../../source/ref-example-projects.rst:19 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"`Quickstart TensorFlow (Blog Post) `_" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "Démarrage rapide de PyTorch" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-example-projects.rst:25 -msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +#: ../../source/ref-changelog.md:456 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -"L'exemple de démarrage rapide PyTorch montre la classification d'images " -"CIFAR-10 avec un simple réseau neuronal convolutif :" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:457 #, fuzzy -msgid "" -"`Quickstart PyTorch (Code) " -"`_" +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -"`Quickstart PyTorch (Code) " -"`_" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-example-projects.rst:29 +#: ../../source/ref-changelog.md:458 #, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" -msgstr "" -"`Quickstart PyTorch (Tutorial) `_" +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch : De la centralisation à la fédération" +#: ../../source/ref-changelog.md:459 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-example-projects.rst:35 -msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +#: ../../source/ref-changelog.md:460 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" -" l'aide de Flower :" +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:462 #, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" -msgstr "" -"`PyTorch : De la centralisation à la fédération (Code) " -"`_" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" +msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/ref-example-projects.rst:38 -#, fuzzy +#: ../../source/ref-changelog.md:464 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -"`PyTorch : De la centralisation à la fédération (Tutoriel) " -"`_" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" +#: ../../source/ref-changelog.md:466 +msgid "" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:468 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"Cet exemple montre comment Flower peut être utilisé pour construire un " -"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " -"Jetson :" -#: ../../source/ref-example-projects.rst:46 -#, fuzzy +#: ../../source/ref-changelog.md:470 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " -"`_" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:474 +#, fuzzy msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " -"`_" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:476 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"Cette page rassemble les réponses aux questions les plus fréquemment " -"posées sur l'apprentissage fédéré avec Flower." -#: ../../source/ref-faq.rst +#: ../../source/ref-changelog.md:478 #, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgid "" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " -"Juptyter / Google Colab ?" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:480 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -"Oui, c'est possible ! Flower est même livré avec quelques optimisations " -"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " -"démarrage rapide :" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:482 #, fuzzy msgid "" -"`Flower simulation PyTorch " -"`_" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:484 #, fuzzy msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" -msgstr "" -"`Flower Quickstart (TensorFlow/Keras) " -"`_" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " -"sur un Raspberry Pi ?" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:486 #, fuzzy msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -"Trouve le `blog post about federated learning on embedded device ici " -"`_" -" et l'exemple de code GitHub correspondant " -"`_." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:488 +msgid "" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " -"sur les appareils Android ?" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:490 #, fuzzy msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -"Oui. Jetez un coup d'œil à notre `blog post " -"`_ ou consultez l'`exemple de code Android sur GitHub " -"`_." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:492 msgid "" -"`Android Kotlin example `_" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" -msgstr "" +#: ../../source/ref-changelog.md:494 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:500 +msgid "" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" -" ?" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:504 +#, fuzzy msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " -"environnement blockchain est disponible ici :" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:506 +#, fuzzy msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:508 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -"`Flower rencontre Nevermined vidéo YouTube " -"`_." -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:510 #, fuzzy msgid "" -"`Flower meets KOSMoS `_." +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -"`Flower rencontre KOSMoS `_." +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:512 msgid "" -"`Flower meets Talan blog post `_ ." +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -"`Flower meets Talan blog post `_ ." -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:514 +#, fuzzy msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -"`Flower rencontre Talan Dépôt GitHub " -"`_ ." - -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "Télémétrie" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:516 +#, fuzzy msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"Le projet open-source Flower recueille des mesures d'utilisation " -"**anonymes** afin de prendre des décisions éclairées pour améliorer " -"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" -" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " -"confrontés." +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:518 +#, fuzzy msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"**Flower est un cadre convivial pour l'IA collaborative et la science des" -" données.** En restant fidèle à cette déclaration, Flower permet de " -"désactiver facilement la télémétrie pour les utilisateurs qui ne " -"souhaitent pas partager des mesures d'utilisation anonymes." - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "Principes" +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -"Nous suivons des principes stricts concernant la collecte de données " -"anonymes sur l'utilisation :" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:522 +#, fuzzy msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " -"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/ref-telemetry.md:12 -msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " -"contiennent aucune information personnelle identifiable (PII). Voir " -"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " -"mesures sont rapportées." -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:526 +#, fuzzy msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " -"sont rapportées ; voir la section \"[Comment inspecter ce qui est " -"rapporté](#how-to-inspect-what-is-being-reported)\"" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-telemetry.md:14 -#, fuzzy +#: ../../source/ref-changelog.md:528 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " -"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" -"to-contact-us)\" pour plus de détails." - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "Comment se désinscrire" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:530 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -"Lorsque Flower démarre, il vérifie la présence d'une variable " -"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " -"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " -"supposant que tu démarres un serveur ou un client Flower, fais-le " -"simplement en faisant précéder ta commande de la façon suivante :" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:532 +#, fuzzy msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " -"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " -"environnement) pour désactiver la télémétrie de la fleur de façon " -"permanente." - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "Mesures collectées" - -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "La télémétrie des fleurs recueille les métriques suivantes :" +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:534 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"**Cela nous aide à décider si nous devons investir des efforts dans la " -"publication d'une version corrective pour une version plus ancienne de " -"Flower ou si nous devons plutôt utiliser la bande passante pour " -"développer de nouvelles fonctionnalités." -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:536 +#, fuzzy msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"**Système d'exploitation.** Nous permet de répondre à des questions " -"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " -"Windows ?" +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:538 +#, fuzzy msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." -msgstr "" -"**Version de Python.** Connaître la version de Python nous aide, par " -"exemple, à décider si nous devons investir des efforts dans la prise en " -"charge des anciennes versions de Python ou cesser de les prendre en " -"charge et commencer à tirer parti des nouvelles fonctionnalités de " -"Python." - -#: ../../source/ref-telemetry.md:36 -msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -"**Comprendre l'environnement matériel dans lequel Flower est utilisé " -"permet de décider si nous devrions, par exemple, faire plus d'efforts " -"pour prendre en charge les environnements à faibles ressources." +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:540 +#, fuzzy msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " -"démarre nous permet de comprendre à quel point certaines fonctionnalités " -"sont utilisées et de mieux établir les priorités en fonction de cela." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +#, fuzzy msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " -"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " -"nous permet de comprendre quels types d'appareils non seulement démarrent" -" les charges de travail Flower, mais aussi les terminent avec succès." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:548 +#, fuzzy msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"**Source.** La télémétrie de Flower essaie de stocker un ID de source " -"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " -"télémétrie est généré. L'ID de source est important pour identifier si un" -" problème est récurrent ou si un problème est déclenché par plusieurs " -"clusters fonctionnant simultanément (ce qui arrive souvent en " -"simulation). Par exemple, si un périphérique exécute plusieurs charges de" -" travail en même temps, et que cela entraîne un problème, alors, afin de " -"reproduire le problème, plusieurs charges de travail doivent être " -"démarrées en même temps." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:550 +#, fuzzy msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " -"souhaites que tous les événements enregistrés sous un identifiant de " -"source spécifique soient supprimés, tu peux envoyer une demande de " -"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " -"Tous les événements liés à cet identifiant de source seront alors " -"définitivement supprimés." +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:552 +#, fuzzy msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"Nous ne collecterons aucune information personnelle identifiable. Si tu " -"penses que l'une des métriques collectées pourrait être utilisée à " -"mauvais escient de quelque manière que ce soit, merci de [nous " -"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " -"refléter toute modification des métriques collectées et nous publierons " -"les changements dans le journal des modifications (changelog)." +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:554 +#, fuzzy msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"Si tu penses que d'autres mesures nous seraient utiles pour mieux " -"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " -"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " -"la vie privée des utilisateurs, nous pourrons les ajouter." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "Comment inspecter ce qui est rapporté" +#: ../../source/ref-changelog.md:556 +#, fuzzy +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/ref-telemetry.md:52 -msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +#: ../../source/ref-changelog.md:558 +#, fuzzy +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " -"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " -"informations de télémétrie rapportées en définissant la variable " -"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " -"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " -"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " -"sans envoyer de mesures." +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/ref-telemetry.md:58 -msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" +#: ../../source/ref-changelog.md:560 +#, fuzzy +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " -"anonymes, utilise les deux variables d'environnement :" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "Comment nous contacter" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:66 -msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +#: ../../source/ref-changelog.md:562 +#, fuzzy +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " -"traitons les mesures d'utilisation anonymes, contacte-nous via " -"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " -"courriel (`telemetry@flower.ai`)." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-android.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "" +#: ../../source/ref-changelog.md:564 +#, fuzzy +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-android.rst:5 +#: ../../source/ref-changelog.md:566 #, fuzzy -msgid "Quickstart Android" -msgstr "Démarrage rapide des Pandas" +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:568 #, fuzzy -msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" -msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:570 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:572 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "" - -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "Démarrage rapide fastai" - -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:574 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." - -#: ../../source/tutorial-quickstart-huggingface.rst:-1 -msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "Démarrage rapide 🤗 Transformateurs" +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:576 +#, fuzzy msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"Construisons un système d'apprentissage fédéré à l'aide des " -"transformateurs Hugging Face et de Flower !" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:578 +#, fuzzy msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " -"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " -"précisément, nous mettrons au point un modèle Transformer pré-entraîné " -"(distilBERT) pour la classification de séquences sur un ensemble de " -"données d'évaluations IMDB. L'objectif final est de détecter si " -"l'évaluation d'un film est positive ou négative." - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "Dépendances" +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:580 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -"Pour suivre ce tutoriel, tu devras installer les paquets suivants : " -":code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, et " -":code:`transformers`. Cela peut être fait en utilisant :code:`pip` :" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "Flux de travail standard pour le visage" - -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "Traitement des données" - -#: ../../source/tutorial-quickstart-huggingface.rst:35 -msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -"Pour récupérer le jeu de données IMDB, nous utiliserons la bibliothèque " -":code:`datasets` de Hugging Face. Nous devons ensuite tokeniser les " -"données et créer des :code:`PyTorch` dataloaders, ce qui est fait dans la" -" fonction :code:`load_data` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "Former et tester le modèle" +"Flower a reçu de nombreuses améliorations sous le capot, trop nombreuses " +"pour être énumérées ici." -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:586 +#, fuzzy msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"Une fois que nous avons trouvé un moyen de créer notre trainloader et " -"notre testloader, nous pouvons nous occuper de l'entraînement et du test." -" C'est très similaire à n'importe quelle boucle d'entraînement ou de test" -" :code:`PyTorch` :" - -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "Créer le modèle lui-même" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:588 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -"Pour créer le modèle lui-même, nous allons simplement charger le modèle " -"distillBERT pré-entraîné en utilisant le " -":code:`AutoModelForSequenceClassification` de Hugging Face :" - -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "Fédérer l'exemple" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "Création du client IMDBC" - -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:590 +#, fuzzy msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"Pour fédérer notre exemple à plusieurs clients, nous devons d'abord " -"écrire notre classe de client Flower (héritant de " -":code:`flwr.client.NumPyClient`). C'est très facile, car notre modèle est" -" un modèle :code:`PyTorch` standard :" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:592 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -"La fonction :code:`get_parameters` permet au serveur d'obtenir les " -"paramètres du client. Inversement, la fonction :code:`set_parameters` " -"permet au serveur d'envoyer ses paramètres au client. Enfin, la fonction " -":code:`fit` forme le modèle localement pour le client, et la fonction " -":code:`evaluate` teste le modèle localement et renvoie les mesures " -"correspondantes." -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "Démarrer le serveur" +#: ../../source/ref-changelog.md:594 +#, fuzzy +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:600 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -"Maintenant que nous avons un moyen d'instancier les clients, nous devons " -"créer notre serveur afin d'agréger les résultats. Avec Flower, cela peut " -"être fait très facilement en choisissant d'abord une stratégie (ici, nous" -" utilisons :code:`FedAvg`, qui définira les poids globaux comme la " -"moyenne des poids de tous les clients à chaque tour) et en utilisant " -"ensuite la fonction :code:`flwr.server.start_server` :" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:604 +#, fuzzy msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"La fonction :code:`weighted_average` est là pour fournir un moyen " -"d'agréger les mesures réparties entre les clients (en gros, cela nous " -"permet d'afficher une belle moyenne de précision et de perte pour chaque " -"tour)." - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "Tout assembler" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "Nous pouvons maintenant démarrer des instances de clients en utilisant :" - -#: ../../source/tutorial-quickstart-huggingface.rst:221 -msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "Et ils pourront se connecter au serveur et démarrer la formation fédérée." +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/tutorial-quickstart-huggingface.rst:223 -#, fuzzy +#: ../../source/ref-changelog.md:606 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -"Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " -"l'exemple de code complet : " -"[https://github.com/adap/flower/tree/main/examples/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:608 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -"Bien sûr, c'est un exemple très basique, et beaucoup de choses peuvent " -"être ajoutées ou modifiées, il s'agissait juste de montrer avec quelle " -"simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" -" Flower." -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:610 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -"Notez que dans cet exemple, nous avons utilisé :code:`PyTorch`, mais nous" -" aurions très bien pu utiliser :code:`TensorFlow`." -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:612 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 -#, fuzzy -msgid "Quickstart iOS" -msgstr "Démarrage rapide XGBoost" - -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:614 #, fuzzy msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -"Dans ce tutoriel, nous allons apprendre, comment former un réseau " -"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-ios.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:616 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:618 #, fuzzy msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-ios.rst:17 -#, fuzzy +#: ../../source/ref-changelog.md:620 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:622 #, fuzzy msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" - -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Client de la fleur" - -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:624 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 -msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." -msgstr "" +#: ../../source/ref-changelog.md:626 +#, fuzzy +msgid "**Deprecate Python 3.7**" +msgstr "**Créer le PR**" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#: ../../source/ref-changelog.md:628 +msgid "" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:630 +#, fuzzy msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-changelog.md:632 +#, fuzzy msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:634 +#, fuzzy msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:636 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:638 +#, fuzzy msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Serveur de Flower" - -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:640 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -"Pour les charges de travail simples, nous pouvons démarrer un serveur " -"Flower et laisser toutes les possibilités de configuration à leurs " -"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " -"Flower et démarre le serveur :" -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "Entraîne le modèle, fédéré !" - -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:642 +#, fuzzy msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout " -"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " -"généralement un serveur et plusieurs clients. Nous devons donc commencer " -"par démarrer le serveur :" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:644 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:646 #, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:648 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "Démarrage rapide de JAX" - -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:650 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "Démarrage rapide des Pandas" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" - -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:652 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:654 +#, fuzzy msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:656 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -"Dans ce tutoriel, nous allons apprendre à entraîner un réseau neuronal " -"convolutif sur CIFAR10 à l'aide de Flower et PyTorch." -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/ref-changelog.md:658 #, fuzzy msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -"Tout d'abord, il est recommandé de créer un environnement virtuel et de " -"tout exécuter au sein d'un `virtualenv `_." +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:660 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " -"même modèle." -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:662 +#, fuzzy msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "" -"Maintenant que nous avons une idée générale de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"exécutant :" +#: ../../source/ref-changelog.md:664 +#, fuzzy +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:666 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"Puisque nous voulons utiliser PyTorch pour résoudre une tâche de vision " -"par ordinateur, allons-y et installons PyTorch et la bibliothèque " -"**torchvision** :" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:668 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -"Maintenant que nous avons installé toutes nos dépendances, lançons une " -"formation distribuée simple avec deux clients et un serveur. Notre " -"procédure de formation et l'architecture de notre réseau sont basées sur " -"`Deep Learning with PyTorch " -"`_ de" -" PyTorch." -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:670 +#, fuzzy msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" -msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" - -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "En outre, nous définissons l'attribution des appareils dans PyTorch avec :" - -#: ../../source/tutorial-quickstart-pytorch.rst:62 -msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"Nous utilisons PyTorch pour charger CIFAR10, un ensemble de données de " -"classification d'images colorées populaire pour l'apprentissage " -"automatique. Le :code:`DataLoader()` de PyTorch télécharge les données " -"d'entraînement et de test qui sont ensuite normalisées." +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" + +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:684 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -"Définis la perte et l'optimiseur avec PyTorch L'entraînement de " -"l'ensemble de données se fait en bouclant sur l'ensemble de données, en " -"mesurant la perte correspondante et en l'optimisant." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:688 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -"Définis ensuite la validation du réseau d'apprentissage automatique. Nous" -" passons en boucle sur l'ensemble de test et mesurons la perte et la " -"précision de l'ensemble de test." +"**Introduire la prise en charge de XGBoost (**`FedXgbNnAvg` **stratégie " +"et exemple)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:690 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -"Après avoir défini l'entraînement et le test d'un modèle d'apprentissage " -"automatique PyTorch, nous utilisons les fonctions pour les clients " -"Flower." +"Nous avons ajouté une nouvelle [stratégie] `FedXgbNnAvg` " +"(https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" et un [exemple de code] " +"(https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) " +"qui démontre l'utilisation de cette nouvelle stratégie dans un projet " +"XGBoost." -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:692 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -"Les clients de Flower utiliseront un CNN simple adapté de \"PyTorch : A " -"60 Minute Blitz\" :" +"**Introduction du SDK iOS (aperçu)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:694 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -"Après avoir chargé l'ensemble des données avec :code:`load_data()`, nous " -"définissons l'interface Flower." +"Il s'agit d'une mise à jour majeure pour tous ceux qui souhaitent mettre " +"en œuvre l'apprentissage fédéré sur les appareils mobiles iOS. Nous " +"disposons désormais d'un SDK swift iOS présent sous " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" qui facilitera grandement le processus de création d'applications. Pour " +"présenter son utilisation, l'[exemple " +"iOS](https://github.com/adap/flower/tree/main/examples/ios) a également " +"été mis à jour !" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:696 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour former " -"le réseau neuronal que nous avons défini plus tôt)." +"**Introduire un nouveau tutoriel \"Qu'est-ce que l'apprentissage fédéré ?" +" \"** ([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:698 +#, fuzzy msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise PyTorch. Mettre en œuvre :code:`NumPyClient` signifie" -" généralement définir les méthodes suivantes (:code:`set_parameters` est " -"cependant facultatif) :" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" - -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (optionnel)" +"Un nouveau [tutoriel d'entrée de gamme] " +"(https://flower.ai/docs/tutorial/Flower-0-What-is-FL.html) dans notre " +"documentation explique les bases de l'apprentissage fédéré. Il permet à " +"tous ceux qui ne connaissent pas l'apprentissage fédéré de commencer leur" +" voyage avec Flower. Fais-le suivre à tous ceux qui s'intéressent à " +"l'apprentissage fédéré !" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:700 msgid "" -"update the local model weights with the parameters received from the " -"server" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -"mettre à jour les poids du modèle local avec les paramètres reçus du " -"serveur" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "fixe les poids du modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "entraîne le modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "recevoir les poids du modèle local mis à jour" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "teste le modèle local" - -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "qui peut être mis en œuvre de la manière suivante :" +"**Introduire une nouvelle fleur Référence : FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:702 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`CifarClient` et ajouter une ligne pour exécuter ce client :" +"Cette nouvelle ligne de base reproduit la tâche MNIST+CNN de l'article " +"[Federated Optimization in Heterogeneous Networks (Li et al., 2018)] " +"(https://arxiv.org/abs/1812.06127). Elle utilise la stratégie `FedProx`, " +"qui vise à rendre la convergence plus robuste dans des contextes " +"hétérogènes." -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -#, fuzzy +#: ../../source/ref-changelog.md:704 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." +"**Introduire une nouvelle ligne de base pour les fleurs : FedAvg " +"FEMNIST** ([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:706 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " -"dans différents terminaux. Ouvre un nouveau terminal et démarre le " -"premier client :" - -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" +"Cette nouvelle ligne de base reproduit une expérience évaluant les " +"performances de l'algorithme FedAvg sur le jeu de données FEMNIST tiré de" +" l'article [LEAF : A Benchmark for Federated Settings (Caldas et al., " +"2018)] (https://arxiv.org/abs/1812.01097)." -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:708 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"Chaque client aura son propre ensemble de données. Tu devrais maintenant " -"voir comment la formation se déroule dans le tout premier terminal (celui" -" qui a démarré le serveur) :" +"**Introduire l'API REST (expérimentale)** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/tutorial-quickstart-pytorch.rst:271 -#, fuzzy +#: ../../source/ref-changelog.md:710 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-pytorch`." +"Une nouvelle API REST a été introduite comme alternative à la pile de " +"communication basée sur gRPC. Dans cette version initiale, l'API REST ne " +"prend en charge que les clients anonymes." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:712 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" +"Remarque : l'API REST est encore expérimentale et est susceptible de " +"changer de manière significative au fil du temps." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "Démarrage rapide de PyTorch Lightning" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 -#, fuzzy +#: ../../source/ref-changelog.md:714 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant PyTorch " -"Lightning et Flower !" +"**Améliorer l'API (expérimentale) du pilote** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:716 msgid "" -"Please refer to the `full code example " -"`_ to learn more." -msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ pour en savoir plus." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" +"L'API du pilote est encore une fonction expérimentale, mais cette version" +" introduit quelques améliorations majeures. L'une des principales " +"améliorations est l'introduction d'une base de données SQLite pour " +"stocker l'état du serveur sur le disque (au lieu de la mémoire). Une " +"autre amélioration est que les tâches (instructions ou résultats) qui ont" +" été livrées seront désormais supprimées, ce qui améliore " +"considérablement l'efficacité de la mémoire d'un serveur Flower " +"fonctionnant depuis longtemps." -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "Démarrage rapide de scikit-learn" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:718 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " -"régression logistique` sur MNIST en utilisant Flower et scikit-learn." +"**Répare les problèmes de déversement liés à Ray pendant les " +"simulations** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-changelog.md:720 #, fuzzy msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -"Il est recommandé de créer un environnement virtuel et de tout exécuter " -"dans ce `virtualenv `_." +"Lors de l'exécution de longues simulations, `ray` déversait parfois " +"d'énormes quantités de données qui rendaient l'entraînement incapable de " +"continuer. ce problème est maintenant corrigé ! 🎉" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:722 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -"*Les clients* sont chargés de générer des mises à jour individuelles des " -"paramètres du modèle en fonction de leurs ensembles de données locales. " -"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " -"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " -"version améliorée du modèle à chaque *client*. Un cycle complet de mises " -"à jour des paramètres s'appelle un *round*." +"**Ajouter un nouvel exemple utilisant** `TabNet` **et Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:724 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -#, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" +"TabNet est un cadre puissant et flexible pour former des modèles " +"d'apprentissage automatique sur des données tabulaires. Nous avons " +"maintenant un exemple fédéré utilisant Flower : [quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)." -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:726 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -"Maintenant que toutes nos dépendances sont installées, exécutons une " -"formation distribuée simple avec deux clients et un serveur. Cependant, " -"avant de configurer le client et le serveur, nous allons définir toutes " -"les fonctionnalités dont nous avons besoin pour notre configuration " -"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " -"contient différentes fonctions définissant toutes les bases de " -"l'apprentissage automatique :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" +"**Ajouter un nouveau guide pratique pour le suivi des simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -"Renvoie les paramètres d'un modèle de régression logistique " -":code:`sklearn`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -#, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" +"Nous avons maintenant un guide de documentation pour aider les " +"utilisateurs à surveiller leurs performances pendant les simulations." -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:730 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -"Tu peux consulter :code:`utils.py` `ici " -"`_ pour plus de détails. Les fonctions prédéfinies sont " -"utilisées dans :code:`client.py` et importées. :code:`client.py` " -"nécessite également d'importer plusieurs paquets tels que Flower et " -"scikit-learn :" +"**Ajouter des mesures de formation à** `History` **objet pendant les " +"simulations** ([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/ref-changelog.md:732 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" +"La fonction `fit_metrics_aggregation_fn` peut être utilisée pour agréger " +"les mesures d'entraînement, mais les versions précédentes " +"n'enregistraient pas les résultats dans l'objet `History`. c'est " +"désormais le cas !" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:734 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"Ensuite, le modèle de régression logistique est défini et initialisé avec" -" :code:`utils.set_initial_params()`." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/ada" + +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:748 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" -" la régression logistique que nous avons définie plus tôt)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:752 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " -"signifie généralement définir les méthodes suivantes " -"(:code:`set_parameters` est cependant facultatif) :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "est directement importé avec :code:`utils.set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" +"**Ajouter la prise en charge de** `workload_id` **et** `group_id` **dans " +"l'API du pilote** ([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:754 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 -#, fuzzy -msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons " -":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " -"véritablement fédérée avec le serveur et les clients s'exécutant sur des " -"machines différentes, tout ce qui doit changer est :code:`server_address`" -" que nous transmettons au client." +"L'API (expérimentale) Driver prend désormais en charge un `workload_id` " +"qui peut être utilisé pour identifier la charge de travail à laquelle une" +" tâche appartient. Elle prend également en charge un nouveau `group_id` " +"qui peut être utilisé, par exemple, pour indiquer le cycle de formation " +"en cours. Le `workload_id` et le `group_id` permettent tous deux aux " +"nœuds clients de décider s'ils veulent traiter une tâche ou non." -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:756 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " -"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" -" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" -"learn." - -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, importe Flower et démarre le serveur :" +"**Faire en sorte que l'adresse de l'API du conducteur et de l'API de la " +"flotte soit configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -#, fuzzy +#: ../../source/ref-changelog.md:758 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -"Le nombre de tours d'apprentissage fédéré est défini dans " -":code:`fit_round()` et l'évaluation est définie dans " -":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " -"chaque tour d'apprentissage fédéré et te donne des informations sur la " -"perte et la précision." +"Le serveur Flower (expérimental) de longue durée (Driver API et Fleet " +"API) peut maintenant configurer l'adresse du serveur de Driver API (via " +"`--driver-api-address`) et de Fleet API (via `--fleet-api-address`) lors " +"de son démarrage :" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:760 +#, fuzzy msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -"Le :code:`main` contient l'initialisation des paramètres côté serveur " -":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " -":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " -"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" -" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" -" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"``flower-superlink --driver-api-address \"0.0.0.0:8081\" --fleet-api-" +"address \"0.0.0.0:8086\" ``" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" -msgstr "" -"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " -"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " -"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" -" commencer par lancer le serveur :" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "Les adresses IPv4 et IPv6 sont toutes deux prises en charge." -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:764 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" -"mnist`." +"**Ajouter un nouvel exemple d'apprentissage fédéré utilisant fastai et " +"Flower** ([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:766 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "Démarrage rapide de TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -"Construisons un système d'apprentissage fédéré en moins de 20 lignes de " -"code !" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" +"Un nouvel exemple de code (`quickstart-fastai`) démontre l'apprentissage " +"fédéré avec [fastai](https://www.fast.ai/) et Flower. Tu peux le trouver " +"ici : [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)." -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:768 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "" -"Comme nous voulons utiliser l'API Keras de TensorFlow (TF), nous devons " -"également installer TF :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"Ensuite, dans un fichier appelé :code:`client.py`, importe Flower et " -"TensorFlow :" +"**Rendre l'exemple Android compatible avec** `flwr >= 1.0.0` **et les " +"dernières versions d'Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:770 +#, fuzzy msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -"Nous utilisons les utilitaires Keras de TF pour charger CIFAR10, un " -"ensemble de données de classification d'images colorées populaire pour " -"l'apprentissage automatique. L'appel à " -":code:`tf.keras.datasets.cifar10.load_data()` télécharge CIFAR10, le met " -"en cache localement, puis renvoie l'ensemble d'entraînement et de test " -"sous forme de NumPy ndarrays." +"L'exemple de code Android a reçu une mise à jour substantielle : le " +"projet est compatible avec Flower 1.0 et les versions ultérieures, " +"l'interface utilisateur a reçu un rafraîchissement complet, et le projet " +"est mis à jour pour être compatible avec les outils Android les plus " +"récents." -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:772 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -"Ensuite, nous avons besoin d'un modèle. Pour les besoins de ce tutoriel, " -"nous utilisons MobilNetV2 avec 10 classes de sortie :" +"**Ajouter une nouvelle stratégie `FedProx`** " +"([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:774 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise Keras. L'interface :code:`NumPyClient` définit trois " -"méthodes qui peuvent être mises en œuvre de la manière suivante :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "Chaque client aura son propre ensemble de données." +"Cette " +"[stratégie](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" est presque identique à " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" mais aide les utilisateurs à reproduire ce qui est décrit dans cet " +"[article](https://arxiv.org/abs/1812.06127). Elle ajoute essentiellement " +"un paramètre appelé `proximal_mu` pour régulariser les modèles locaux par" +" rapport aux modèles globaux." -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:776 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -"Tu devrais maintenant voir comment la formation se déroule dans le tout " -"premier terminal (celui qui a démarré le serveur) :" +"**Ajouter de nouvelles métriques aux événements de télémétrie** " +"([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 -#, fuzzy +#: ../../source/ref-changelog.md:778 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le `code source complet " -"`_ pour cela se trouve dans :code:`examples" -"/quickstart-tensorflow/client.py`." +"Une structure d'événements mise à jour permet, par exemple, de regrouper " +"des événements au sein d'une même charge de travail." -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:780 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" +"**Ajouter une nouvelle section de tutoriel sur les stratégies " +"personnalisées** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "Démarrage rapide XGBoost" - -#: ../../source/tutorial-quickstart-xgboost.rst:14 +#: ../../source/ref-changelog.md:782 #, fuzzy -msgid "Federated XGBoost" -msgstr "Formation fédérée" - -#: ../../source/tutorial-quickstart-xgboost.rst:16 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la mise en œuvre d'une stratégie personnalisée à partir de zéro" +" : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-3-Building-a" +"-Strategy-PyTorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:784 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" +"**Ajouter une nouvelle section de tutoriel sur la sérialisation " +"personnalisée** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/tutorial-quickstart-xgboost.rst:23 +#: ../../source/ref-changelog.md:786 #, fuzzy -msgid "Why federated XGBoost?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" - -#: ../../source/tutorial-quickstart-xgboost.rst:25 -msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:27 -msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:30 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" +"Le tutoriel sur les fleurs comporte désormais une nouvelle section qui " +"traite de la sérialisation personnalisée : [Ouvrir dans " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial/Flower-4" +"-Client-and-NumPyClient-PyTorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:788 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/ada" -#: ../../source/tutorial-quickstart-xgboost.rst:47 -#, fuzzy -msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" - -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:792 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" +"**Mise à jour de la documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" -#: ../../source/tutorial-quickstart-xgboost.rst:60 -#, fuzzy +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés à PyTorch :" +"Comme d'habitude, la documentation s'est beaucoup améliorée. C'est une " +"autre étape dans notre effort pour faire de la documentation de Flower la" +" meilleure documentation de tout projet. Reste à l'écoute et comme " +"toujours, n'hésite pas à nous faire part de tes commentaires !" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" -msgstr "" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:806 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" +"adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " +"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:810 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" +"**Introduire une nouvelle fleur Référence : FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/ref-changelog.md:812 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" +"Au cours des prochaines semaines, nous publierons un certain nombre de " +"nouvelles implémentations de référence utiles en particulier pour les " +"nouveaux venus en FL. Elles revisiteront généralement des articles bien " +"connus de la littérature, et seront adaptées à l'intégration dans votre " +"propre application ou à l'expérimentation, afin d'approfondir votre " +"connaissance de FL en général. La publication d'aujourd'hui est la " +"première de cette série. [Lire la " +"suite.](https://flower.ai/blog/2023-01-12-fl-starter-pack-fedavg-mnist-" +"cnn/)" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/ref-changelog.md:814 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" +"**Améliorer la prise en charge des GPU dans les simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/ref-changelog.md:816 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" +"Le moteur client virtuel basé sur Ray (`start_simulation`) a été mis à " +"jour pour améliorer la prise en charge des GPU. La mise à jour inclut " +"certaines des leçons durement apprises lors de la mise à l'échelle des " +"simulations dans des environnements de grappes de GPU. De nouveaux " +"paramètres par défaut rendent l'exécution des simulations basées sur les " +"GPU beaucoup plus robuste." -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +#: ../../source/ref-changelog.md:818 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" +"**Améliorer la prise en charge du GPU dans les tutoriels Jupyter " +"Notebook** ([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:820 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" +"Certains utilisateurs ont signalé que les carnets Jupyter n'ont pas " +"toujours été faciles à utiliser sur les instances GPU. Nous les avons " +"écoutés et avons apporté des améliorations à tous nos carnets Jupyter ! " +"Découvre les carnets mis à jour ici :" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:822 +#, fuzzy msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" +"[Une introduction à l'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-1-Intro-to-FL-PyTorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:823 +#, fuzzy msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" +"[Stratégies d'apprentissage fédéré] " +"(https://flower.ai/docs/tutorial/Flower-2-Strategies-in-FL-PyTorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:824 +#, fuzzy msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" +"[Construire une stratégie] " +"(https://flower.ai/docs/tutorial/Flower-3-Building-a-Strategy-" +"PyTorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:825 +#, fuzzy msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" +"[Client et NumPyClient] (https://flower.ai/docs/tutorial/Flower-4-Client-" +"and-NumPyClient-PyTorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:827 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" +"**Introduire la télémétrie optionnelle** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:829 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" +"À la suite d'une [demande de commentaires] " +"(https://github.com/adap/flower/issues/1534) de la part de la communauté," +" le projet open-source Flower introduit la collecte optionnelle de " +"mesures d'utilisation *anonymes* afin de prendre des décisions éclairées " +"pour améliorer Flower. Cela permet à l'équipe de Flower de comprendre " +"comment Flower est utilisé et quels sont les défis auxquels les " +"utilisateurs peuvent être confrontés." -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:831 #, fuzzy msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** Restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des métriques d'utilisation anonymes.[Lire la " +"suite.](https://flower.ai/docs/telemetry.html)." -#: ../../source/tutorial-quickstart-xgboost.rst:300 -#, fuzzy +#: ../../source/ref-changelog.md:833 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." +"**([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-quickstart-xgboost.rst:311 -#, fuzzy +#: ../../source/ref-changelog.md:835 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -"*Les clients* sont chargés de générer des mises à jour de poids " -"individuelles pour le modèle en fonction de leurs ensembles de données " -"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " -"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " -"cette version améliorée du modèle à chaque *client*. Un cycle complet de " -"mises à jour de poids s'appelle un *round*." +"Flower dispose désormais d'une nouvelle API de pilote (expérimentale) qui" +" permettra de créer des applications Federated Learning et Federated " +"Analytics entièrement programmables, asynchrones et multi-tenant. Ouf, " +"c'est beaucoup ! À l'avenir, l'API de pilote sera l'abstraction sur " +"laquelle de nombreuses fonctionnalités à venir seront construites - et tu" +" peux commencer à construire ces choses dès maintenant, aussi." -#: ../../source/tutorial-quickstart-xgboost.rst:314 -#, fuzzy +#: ../../source/ref-changelog.md:837 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." -msgstr "" -"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " -"liés au MXNet :" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" +"L'API du pilote permet également un nouveau mode d'exécution dans lequel " +"le serveur s'exécute indéfiniment. Plusieurs charges de travail " +"individuelles peuvent s'exécuter simultanément et démarrer et arrêter " +"leur exécution indépendamment du serveur. Ceci est particulièrement utile" +" pour les utilisateurs qui souhaitent déployer Flower en production." -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:839 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:342 -#, fuzzy -msgid "Then, we start the server:" -msgstr "Démarrer le serveur" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" +"Pour en savoir plus, consulte l'exemple de code `mt-pytorch`. Nous " +"attendons tes commentaires avec impatience !" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:841 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" +"Remarque : *L'API du pilote est encore expérimentale et est susceptible " +"de changer de manière significative au fil du temps.*" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:843 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" +"**Ajouter un nouvel exemple de Federated Analytics avec Pandas** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:845 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" +"Un nouvel exemple de code (`quickstart-pandas`) démontre l'analyse " +"fédérée avec Pandas et Flower. Tu peux le trouver ici : [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:847 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" +"**Ajouter de nouvelles stratégies : Krum et MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:849 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" +"Edoardo, étudiant en informatique à l'Université Sapienza de Rome, a " +"contribué à une nouvelle stratégie `Krum` qui permet aux utilisateurs " +"d'utiliser facilement Krum et MultiKrum dans leurs charges de travail." -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +#: ../../source/ref-changelog.md:851 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" +"**Mettre à jour l'exemple C++ pour qu'il soit compatible avec Flower " +"v1.2.0** ([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:853 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" +"L'exemple de code C++ a reçu une mise à jour substantielle pour le rendre" +" compatible avec la dernière version de Flower." -#: ../../source/tutorial-quickstart-xgboost.rst:590 -#, fuzzy +#: ../../source/ref-changelog.md:855 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." +"**Améliorations générales** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +#: ../../source/ref-changelog.md:859 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" +"**Documentation mise à jour** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/ref-changelog.md:863 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" +"L'un des points forts est le nouveau [guide du premier contributeur] " +"(https://flower.ai/docs/first-time-contributors.html) : si tu n'as jamais" +" contribué sur GitHub auparavant, c'est l'endroit idéal pour commencer !" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -#, fuzzy -msgid "Cyclic training" -msgstr "Formation centralisée" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/ref-changelog.md:873 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" +"Nous aimerions **remercier tout particulièrement** tous les contributeurs" +" qui ont rendu possible la nouvelle version de Flower (dans l'ordre `git " +"shortlog`) :" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/ref-changelog.md:875 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:879 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" +"**Introduire les enveloppes de confidentialité différentielle (aperçu)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:881 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" +"Le premier aperçu (expérimental) des wrappers enfichables de " +"confidentialité différentielle permet de configurer et d'utiliser " +"facilement la confidentialité différentielle (DP). Les wrappers DP " +"enfichables permettent une utilisation agnostique du cadre **et** de la " +"stratégie à la fois de la DP côté client et de la DP côté serveur. Va " +"voir les documents de Flower, un nouvel explicatif va plus loin dans les " +"détails." -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:883 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" +"**Nouvel exemple de code CoreML pour iOS** " +"([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +#: ../../source/ref-changelog.md:885 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" +"Flower passe à iOS ! Un nouvel exemple de code massif montre comment les " +"clients Flower peuvent être construits pour iOS. L'exemple de code " +"contient à la fois des composants Flower iOS SDK qui peuvent être " +"utilisés pour de nombreuses tâches, et un exemple de tâche fonctionnant " +"sur CoreML." -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:887 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" +"**Nouvelle stratégie de FedMedian** " +"([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -#, fuzzy -msgid "Customised centralised/distributed evaluation" -msgstr "Évaluation centralisée" - -#: ../../source/tutorial-quickstart-xgboost.rst:792 +#: ../../source/ref-changelog.md:889 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" +"La nouvelle stratégie `FedMedian` met en œuvre Federated Median " +"(FedMedian) par [Yin et al., 2018] " +"(https://arxiv.org/pdf/1803.01498v1.pdf)." -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:891 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" +"**Log** `Client` **exceptions dans le moteur de client virtuel** " +"([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-quickstart-xgboost.rst:827 +#: ../../source/ref-changelog.md:893 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" +"Toutes les exceptions `Client` qui se produisent dans le VCE sont " +"maintenant enregistrées par défaut et ne sont pas seulement exposées à la" +" `Stratégie` configurée (via l'argument `failures`)." -#: ../../source/tutorial-quickstart-xgboost.rst:831 -#, fuzzy -msgid "Flower simulation" -msgstr "Simulation de moniteur" - -#: ../../source/tutorial-quickstart-xgboost.rst:832 +#: ../../source/ref-changelog.md:895 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" +"**Améliorer le moteur du client virtuel** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:897 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" +"Le VCE utilise maintenant Ray 2.0 sous le capot, le type de valeur du " +"dictionnaire `client_resources` a été remplacé par `float` pour permettre" +" l'allocation de fractions de ressources." -#: ../../source/tutorial-quickstart-xgboost.rst:921 +#: ../../source/ref-changelog.md:899 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" -#: ../../source/tutorial-quickstart-xgboost.rst:975 +#: ../../source/ref-changelog.md:901 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" +"Le moteur de client virtuel prend désormais en charge les méthodes " +"optionnelles `Client` (et `NumPyClient`)." -#: ../../source/tutorial-quickstart-xgboost.rst:995 +#: ../../source/ref-changelog.md:903 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" +"**Fournir des informations de type aux paquets en utilisant** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 +#: ../../source/ref-changelog.md:905 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" +"Le paquet `flwr` est maintenant accompagné d'un fichier `py.typed` " +"indiquant que le paquet est typé. Cela permet de prendre en charge le " +"typage pour les projets ou les paquets qui utilisent `flwr` en leur " +"permettant d'améliorer leur code à l'aide de vérificateurs de types " +"statiques comme `mypy`." -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:907 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" +"**Exemple de code mis à jour** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/ref-changelog.md:909 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +#: ../../source/ref-changelog.md:911 +msgid "" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" +"**Documentation mise à jour** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/ref-changelog.md:913 +msgid "" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" +"Il y a eu tellement de mises à jour de la documentation que cela n'a même" +" pas de sens de les énumérer individuellement." -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -#, fuzzy -msgid "Example commands" -msgstr "Exemples de PyTorch" - -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/ref-changelog.md:915 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" +"**Documentation restructurée** " +"([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -#, fuzzy -msgid "Then, on each client terminal, we start the clients:" -msgstr "Ouvre un autre terminal et démarre le deuxième client :" - -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:917 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" +"La documentation a été restructurée pour faciliter la navigation. Ce " +"n'est que la première étape d'un effort plus important visant à faire de " +"la documentation de Flower la meilleure documentation de tous les projets" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -#, fuzzy +#: ../../source/ref-changelog.md:919 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le code source complet " -"`_ de cet exemple se trouve dans :code:`examples" -"/quickstart-mxnet`." - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -#, fuzzy -msgid "Build a strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +"**Ouvrir dans le bouton Colab** " +"([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:921 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__) " -"et nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et sur les clients " -"(`partie 2 `__)." +"Les quatre parties du didacticiel d'apprentissage fédéré Flower sont " +"maintenant accompagnées d'un nouveau bouton \"Ouvrir dans Colab\". Pas " +"besoin d'installer quoi que ce soit sur ta machine locale, tu peux " +"maintenant utiliser et apprendre à connaître Flower dans ton navigateur, " +"il te suffit d'un simple clic." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:923 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -"Dans ce carnet, nous allons continuer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit précédemment en créant " -"une version personnalisée de FedAvg (encore une fois, en utilisant " -"`Flower `__ et `PyTorch `__)." +"**Tutoriel amélioré** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:925 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." +"Le tutoriel sur l'apprentissage fédéré des fleurs a deux toutes nouvelles" +" parties couvrant les stratégies personnalisées (encore WIP) et la " +"distinction entre `Client` et `NumPyClient`. Les parties un et deux " +"existantes ont également été améliorées (beaucoup de petits changements " +"et de corrections)." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "Préparation" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "Points forts" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 -msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "" -"Avant de commencer le code proprement dit, assurons-nous que nous " -"disposons de tout ce dont nous avons besoin." +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "Moteur de client virtuel stable** (accessible via `start_simulation`)" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "Installation des dépendances" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "Toutes les méthodes `Client`/`NumPyClient` sont maintenant optionnelles" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "Tout d'abord, nous installons les paquets nécessaires :" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "`get_parameters` configurable" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:938 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -"Maintenant que toutes les dépendances sont installées, nous pouvons " -"importer tout ce dont nous avons besoin pour ce tutoriel :" +"Des tonnes de petits nettoyages d'API résultant en une expérience plus " +"cohérente pour les développeurs" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:942 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." +"Nous tenons à remercier **particulièrement** tous les contributeurs qui " +"ont rendu Flower 1.0 possible (dans l'ordre inverse de [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors)) :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "Chargement des données" +#: ../../source/ref-changelog.md:944 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), [@mrinaald](" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:948 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation), et enveloppons le tout dans " -"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " -"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " -"différents nombres de clients." +"**Tous les arguments doivent être passés comme des arguments de mot-clé**" +" ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "Formation/évaluation du modèle" +#: ../../source/ref-changelog.md:950 +#, fuzzy +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" +"Le code qui utilise des arguments positionnels (par exemple, " +"``start_client(\"127.0.0.1:8080\", FlowerClient())`) doit ajouter le mot-" +"clé pour chaque argument positionnel (par exemple, " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:952 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -"Continuons avec la définition habituelle du modèle (y compris " -"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " -"et de test :" +"**Introduire l'objet de configuration** `ServerConfig` **dans** " +"`start_server` **et** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Client de Flower" +#: ../../source/ref-changelog.md:954 +msgid "" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" +"Au lieu d'un dictionnaire de configuration `{\"num_rounds\" : 3, " +"\"round_timeout\" : 600.0}`, `start_server` et `start_simulation` " +"attendent maintenant un objet de configuration de type " +"`flwr.server.ServerConfig`. `ServerConfig` prend les mêmes arguments que " +"le dict de configuration précédent, mais il rend l'écriture de code " +"sécurisé plus facile et les valeurs des paramètres par défaut plus " +"transparentes." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:956 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" -"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " -"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " -"transmettons également le ``cid`` au client et l'utilisons pour consigner" -" des détails supplémentaires :" +"**Renommer les paramètres de la stratégie intégrée pour plus de clarté** " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" +#: ../../source/ref-changelog.md:958 +msgid "" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "" +"Les paramètres de stratégie intégrés suivants ont été renommés pour " +"améliorer la lisibilité et la cohérence avec d'autres API :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "Élaborer une stratégie à partir de zéro" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" + +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" + +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:964 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " -"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " -"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " -"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " -"changerons ensuite le dictionnaire de configuration (l'un des attributs " -"``FitIns``)." +"**Mettre à jour les arguments par défaut des stratégies intégrées** " +"([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:966 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " -"créée ``FedCustom`` lors du démarrage de l'expérience :" +"Toutes les stratégies intégrées utilisent désormais `fraction_fit=1.0` et" +" `fraction_evaluate=1.0`, ce qui signifie qu'elles sélectionnent *tous* " +"les clients actuellement disponibles pour l'entraînement et l'évaluation." +" Les projets qui s'appuyaient sur les valeurs par défaut précédentes " +"peuvent retrouver le comportement antérieur en initialisant la stratégie " +"de la manière suivante :" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "Récapitulation" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`stratégie = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:970 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Dans ce carnet, nous avons vu comment mettre en place une stratégie " -"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " -"sur la configuration des nœuds clients, l'agrégation des résultats, et " -"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " -"d'écraser les méthodes abstraites de la classe de base (abstraite) " -"``Strategy``. Pour rendre les stratégies personnalisées encore plus " -"puissantes, tu peux passer des fonctions personnalisées au constructeur " -"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " -"chaque fois que c'est nécessaire." +"**Ajouter** `server_round` **à** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:972 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " -"Slack : `Join Slack `__" +"La méthode `Stratégie` `évaluer` reçoit maintenant le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre." -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:974 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " -"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" +"**Ajouter** `server_round` **et** `config` **paramètres à** `evaluate_fn`" +" ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -#, fuzzy +#: ../../source/ref-changelog.md:976 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " -"`__ présente ``Client``, l'API flexible qui sous-tend " -"``NumPyClient``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -#, fuzzy -msgid "Customize the client" -msgstr "Création du client IMDBC" +"Le `evaluate_fn` passé aux stratégies intégrées comme `FedAvg` prend " +"maintenant trois paramètres : (1) le cycle actuel " +"d'apprentissage/évaluation fédéré (`server_round`), (2) les paramètres du" +" modèle à évaluer (`parameters`), et (3) un dictionnaire de configuration" +" (`config`)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:978 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" -" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" -" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__), " -"nous avons appris comment les stratégies peuvent être utilisées pour " -"personnaliser l'exécution à la fois sur le serveur et les clients " -"(`partie 2 `__), et nous avons construit notre propre stratégie " -"personnalisée à partir de zéro (`partie 3 - WIP " -"`__)." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:980 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " -"nouvelle classe de base pour construire des clients, simplement appelée " -"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " -"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" -" travail avec les bibliothèques d'apprentissage automatique qui ont une " -"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " -"flexibilité que nous n'avions pas auparavant, mais nous devrons également" -" faire quelques choses que nous n'avions pas à faire auparavant." +"Plusieurs méthodes et fonctions de Flower (`evaluate_fn`, " +"`configure_fit`, `aggregate_fit`, `configure_evaluate`, " +"`aggregate_evaluate`) reçoivent le cycle actuel " +"d'apprentissage/évaluation fédéré comme premier paramètre. Pour améliorer" +" la fiabilité et éviter la confusion avec *random*, ce paramètre a été " +"renommé de `rnd` à `server_round`." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:982 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -"Allons plus loin et voyons ce qu'il faut faire pour passer de " -"``NumPyClient`` à ``Client`` !" +"**Déplacer** `flwr.dataset` **vers** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "Étape 0 : Préparation" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "Le paquet expérimental `flwr.dataset` a été migré vers Flower Baselines." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:986 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " -"divisons-les en dix ensembles de données plus petits (chacun divisé en " -"ensemble d'entraînement et de validation) et enveloppons le tout dans " -"leur propre ``DataLoader``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "Étape 1 : Revoir NumPyClient" +"**Supprimer les stratégies expérimentales** " +"([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:988 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -"Jusqu'à présent, nous avons implémenté notre client en sous-classant " -"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " -"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " -"nous enveloppons la création d'instances de cette classe dans une " -"fonction appelée ``client_fn`` :" +"Supprimer les stratégies expérimentales non maintenues (`FastAndSlow`, " +"`FedFSv0`, `FedFSv1`)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:990 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " -"présent. La seule *petite* différence par rapport au carnet précédent est" -" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " -"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " -"que nous obtenons :" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:992 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " -"d'apprentissage fédéré." +"`flwr.common.Weights` a été renommé en `flwr.common.NDArys` pour mieux " +"rendre compte de la nature de ce type." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:994 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"Plongeons un peu plus profondément et discutons de la façon dont Flower " -"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " -"effectuer un travail, ``start_simulation`` appelle la fonction " -"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" -" (en même temps qu'il charge le modèle et les données)." +"**Supprimez l'ancien** `force_final_distributed_eval` **de** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:996 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -"Mais voici la partie la plus surprenante : Flower n'utilise pas " -"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " -"l'objet pour le faire ressembler à une sous-classe de " -"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " -"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " -"seulement comment gérer les `Client`. `NumPyClient` est juste une " -"abstraction de commodité construite au dessus de `Client`." +"Le paramètre `start_server` `force_final_distributed_eval` a longtemps " +"été un artefact historique, dans cette version il a finalement disparu " +"pour de bon." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:998 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " -"directement par-dessus `Client``." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" +#: ../../source/ref-changelog.md:1000 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." +msgstr "" +"La méthode `get_parameters` accepte maintenant un dictionnaire de " +"configuration, tout comme `get_properties`, `fit`, et `evaluate`." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:1002 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"Essayons de faire la même chose en utilisant ``Client`` au lieu de " -"``NumPyClient``." +"**Remplace** `num_rounds` **dans** `start_simulation` **avec le nouveau**" +" `config` **paramètre** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:1004 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " -"devons nous assurer que notre nouveau client basé sur le ``Client`` " -"fonctionne, n'est-ce pas ?" +"La fonction `start_simulation` accepte maintenant un dictionnaire de " +"configuration `config` au lieu de l'entier `num_rounds`. Cela améliore la" +" cohérence entre `start_simulation` et `start_server` et facilite la " +"transition entre les deux." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:1008 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " -"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " -"différence ?" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1010 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"La version précédente de Flower a introduit la prise en charge " +"expérimentale de Python 3.10, cette version déclare la prise en charge de" +" Python 3.10 comme stable." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:1012 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -"La seule *vraie* différence entre Client et NumPyClient est que " -"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " -"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " -"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " -"Cela permet de travailler avec des bibliothèques d'apprentissage " -"automatique qui ont une bonne prise en charge de NumPy (la plupart " -"d'entre elles) en un clin d'œil." +"**Rendre toutes les **méthodes `Client` **et** `NumPyClient` " +"**facultatives** ([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:1014 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "Étape 3 : Sérialisation personnalisée" +"Les méthodes `Client`/`NumPyClient` `get_properties`, `get_parameters`, " +"`fit`, et `evaluate` sont toutes optionnelles. Cela permet d'écrire des " +"clients qui n'implémentent, par exemple, que `fit`, mais aucune autre " +"méthode. Pas besoin d'implémenter `evaluate` quand on utilise " +"l'évaluation centralisée !" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:1016 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"Nous allons ici explorer comment mettre en œuvre une sérialisation " -"personnalisée à l'aide d'un exemple simple." +"**Autoriser le passage d'une **instance `Server` à** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:1018 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " -"simplement le processus de conversion d'un objet en octets bruts, et tout" -" aussi important, la désérialisation est le processus de reconversion des" -" octets bruts en objet. Ceci est très utile pour la communication réseau." -" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " -"objet Python par Internet." +"Comme pour `start_server`, `start_simulation` accepte maintenant une " +"instance complète de `Server`. Cela permet aux utilisateurs de " +"personnaliser fortement l'exécution des expériences et ouvre la porte à " +"l'exécution, par exemple, de FL asynchrones à l'aide du moteur de client " +"virtuel." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:1020 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"L'apprentissage fédéré s'appuie fortement sur la communication Internet " -"pour la formation en envoyant des objets Python dans les deux sens entre " -"les clients et le serveur, ce qui signifie que la sérialisation est un " -"élément essentiel de l'apprentissage fédéré." +"**Mettre à jour les exemples de code** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:1022 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -"Dans la section suivante, nous allons écrire un exemple de base où, au " -"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " -"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " -"éparses, avant de les envoyer. Cette technique peut être utilisée pour " -"économiser de la bande passante, car dans certains cas où les poids d'un " -"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " -"une matrice éparse peut grandement améliorer leur taille en octets." +"De nombreux exemples de code ont reçu de petites ou même de grandes mises" +" à jour de maintenance" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 -msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." -msgstr "" -"C'est là que la véritable sérialisation/désérialisation se produira, en " -"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " -"``sparse_bytes_to_ndarray`` pour la désérialisation." +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "" -"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " -"convertir nos tableaux." +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch` (démarrage rapide)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "Côté client" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow` (en anglais)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1031 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " -"suffira d'appeler nos fonctions personnalisées dans notre " -"``flwr.client.Client``." +"**Supprime l'exemple de simulation obsolète** " +"([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:1033 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " -"que nous avons obtenus de notre réseau en utilisant nos " -"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." +"Supprime l'exemple obsolète `simulation` et renomme " +"`quickstart_simulation` en `simulation_tensorflow` pour qu'il corresponde" +" au nom de `simulation_pytorch`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:1035 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " -"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " -"personnalisé, puis nous devons sérialiser nos résultats locaux avec " -"``ndarrays_to_sparse_parameters``." +"**Mise à jour de la documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:1037 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " -"paramètres globaux avec notre fonction personnalisée." +"Une mise à jour substantielle de la documentation corrige plusieurs " +"petits problèmes de rendu, rend les titres plus succincts pour améliorer " +"la navigation, supprime une bibliothèque obsolète, met à jour les " +"dépendances de la documentation, inclut le module `flwr.common` dans la " +"référence de l'API, inclut le support de la documentation basée sur le " +"markdown, migre le changelog de `.rst` vers `.md`, et corrige un certain " +"nombre de détails plus petits !" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "Côté serveur" +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "**Mises à jour mineures**" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:1041 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." -" Pour modifier la sérialisation et la désérialisation ici, il suffit de " -"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " -"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " -"super-classe ``FedAvg``." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" +"Ajoute un chiffre rond pour ajuster et évaluer les messages du journal " +"([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:1042 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " -"résultat que nous avons reçu :" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "Puis sérialise le résultat agrégé :" +"Ajouter une connexion gRPC sécurisée à l'exemple de code " +"`advanced_tensorflow` ([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/ref-changelog.md:1043 +msgid "" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -"Nous pouvons maintenant exécuter notre exemple de sérialisation " -"personnalisée !" +"Mettre à jour les outils de développement " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:1044 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." -msgstr "" -"Dans cette partie du tutoriel, nous avons vu comment construire des " -"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " -"``NumPyClient`` est une abstraction de commodité qui facilite le travail " -"avec les bibliothèques d'apprentissage automatique qui ont une bonne " -"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " -"nous permet de faire des choses qui ne sont pas possibles dans " -"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " -"sérialisation et la désérialisation des paramètres." - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"C'est la dernière partie du tutoriel Flower (pour l'instant !), " -"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " -"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " -"dans le tutoriel, nous te recommandons les ressources suivantes :" +"Renomme les messages ProtoBuf pour améliorer la cohérence " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "`Lire les docs sur les fleurs `__" +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:1050 msgid "" -"`Check out Flower Code Examples " -"`__" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -"`Check out Flower Code Examples " -"`__" +"**Flower Baselines (preview) : FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:1052 #, fuzzy msgid "" -"`Use Flower Baselines for your research " -"`__" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -"`Utilise les lignes de base des fleurs pour ta recherche " -"`__" +"La première version préliminaire de Flower Baselines est arrivée ! Nous " +"démarrons Flower Baselines avec des implémentations de FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, et FedAvgM. Consultez la documentation sur " +"l'utilisation de [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html). Avec cette première version préliminaire, nous invitons " +"également la communauté à [contribuer à leurs propres lignes de " +"base](https://flower.ai/docs/baselines/how-to-contribute-baselines.html)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -#, fuzzy +#: ../../source/ref-changelog.md:1054 msgid "" -"`Watch Flower Summit 2023 videos `__" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -"`Regardez les vidéos du Flower Summit 2022 `__" +"**SDK client C++ (aperçu) et exemple de code** " +"([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/ref-changelog.md:1056 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" +"L'aperçu C++ comprend un SDK pour les clients Flower et un exemple de " +"code de démarrage rapide qui démontre un client C++ simple utilisant le " +"SDK." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -#, fuzzy +#: ../../source/ref-changelog.md:1058 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " -"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " -"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " -"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " -"basé sur PyTorch en utilisant Flower." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -#, fuzzy -msgid "Let's get started!" -msgstr "Allons-y, déclarons-le !" +"**Ajouter la prise en charge expérimentale de Python 3.10 et Python " +"3.11** ([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1060 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " -"dont nous avons besoin." +"Python 3.10 est la dernière version stable de Python et Python 3.11 " +"devrait sortir en octobre. Cette version de Flower ajoute une prise en " +"charge expérimentale pour les deux versions de Python." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -#, fuzzy +#: ../../source/ref-changelog.md:1062 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " -"et ``torchvision``) et Flower (``flwr``) :" +"**Agréger des mesures personnalisées grâce à des fonctions fournies par " +"l'utilisateur** ([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -#, fuzzy +#: ../../source/ref-changelog.md:1064 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -"Il est possible de passer à un runtime dont l'accélération GPU est " -"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " -"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " -"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " -"liée à la disponibilité du GPU dans l'une des sections suivantes, " -"envisage de repasser à une exécution basée sur le CPU en définissant " -"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" -" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " -"il dira ``Training on cpu``." +"Les stratégies intégrées prennent en charge deux nouveaux arguments, " +"`fit_metrics_aggregation_fn` et `evaluate_metrics_aggregation_fn`, qui " +"permettent de passer des fonctions d'agrégation de métriques " +"personnalisées." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "Chargement des données" +#: ../../source/ref-changelog.md:1066 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "" +"**Temps d'attente configurable par l'utilisateur** " +"([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -#, fuzzy +#: ../../source/ref-changelog.md:1068 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " -"dans différents domaines. Dans ce tutoriel, nous présentons " -"l'apprentissage fédéré en formant un simple réseau neuronal " -"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " -"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " -"font la distinction entre les images de dix classes différentes :" +"Si le dictionnaire `config` contient une clé `round_timeout` (avec une " +"valeur `float` en secondes), le serveur attendra *au moins* " +"`round_timeout` secondes avant de fermer la connexion." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/ref-changelog.md:1070 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" -" plusieurs organisations (également appelé le paramètre \"cross-silo\" " -"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " -"original en plusieurs partitions. Chaque partition représentera les " -"données d'une seule organisation. Nous faisons cela purement à des fins " -"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" -" données parce que chaque organisation a déjà ses propres données (les " -"données sont donc naturellement partitionnées)." +"**Permettre l'utilisation simultanée de l'évaluation fédérée et de " +"l'évaluation centralisée dans toutes les stratégies intégrées** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -#, fuzzy +#: ../../source/ref-changelog.md:1072 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -"Chaque organisation agira comme un client dans le système d'apprentissage" -" fédéré. Ainsi, le fait que dix organisations participent à une " -"fédération signifie que dix clients sont connectés au serveur " -"d'apprentissage fédéré :" +"Les stratégies intégrées peuvent maintenant effectuer une évaluation " +"fédérée (c'est-à-dire côté client) et une évaluation centralisée " +"(c'est-à-dire côté serveur) dans le même tour. L'évaluation fédérée peut " +"être désactivée en réglant `fraction_eval` sur `0.0`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +#: ../../source/ref-changelog.md:1074 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" +"**Deux nouveaux tutoriels Jupyter Notebook** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -#, fuzzy +#: ../../source/ref-changelog.md:1076 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -"Nous avons maintenant une liste de dix ensembles de formation et dix " -"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" -" les données de dix organisations différentes. Chaque paire " -"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " -"exemples de validation. Il y a également un seul ``testloader`` (nous " -"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " -"nécessaire que pour construire des systèmes de recherche ou d'éducation, " -"les systèmes d'apprentissage fédérés actuels ont leurs données " -"naturellement distribuées à travers plusieurs partitions." +"Deux tutoriels Jupyter Notebook (compatibles avec Google Colab) " +"expliquent les fonctionnalités de base et intermédiaires de Flower :" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:1078 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " -"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " -"poursuivre :" +"*Introduction à l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:1080 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" -" ``chargeur de formation`` de notre liste de dix ``chargeurs de " -"formation``. Elle imprime également les étiquettes associées à chaque " -"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " -"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " -"autre lot d'images." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "Étape 1 : Formation centralisée avec PyTorch" +"*Utiliser des stratégies dans l'apprentissage fédéré* : [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:1082 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " -"neuronal convolutif. Cette introduction suppose une familiarité de base " -"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " -"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " -"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " -"`__." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "Définir le modèle" +"**Nouvelle stratégie FedAvgM (Federated Averaging with Server Momentum)**" +" ([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:1084 +#, fuzzy msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " -"`__ :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "Entraîne le modèle" +"La nouvelle stratégie `FedAvgM` met en œuvre la moyenne fédérée avec le " +"momentum du serveur [Hsu et al., 2019]." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:1086 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -"Nous avons maintenant tous les éléments de base dont nous avons besoin : " -"un ensemble de données, un modèle, une fonction d'entraînement et une " -"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " -"de données de l'une de nos organisations (``trainloaders[0]``). Cela " -"simule la réalité de la plupart des projets d'apprentissage automatique " -"aujourd'hui : chaque organisation possède ses propres données et entraîne" -" les modèles uniquement sur ces données internes :" +"**Nouvel exemple de code PyTorch avancé** " +"([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:1088 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " -"époques devrait se traduire par une précision de l'ensemble de test " -"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " -"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " -"juste de montrer un pipeline d'entraînement centralisé simpliste qui " -"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "Étape 2 : Apprentissage fédéré avec Flower" +"Un nouvel exemple de code (`advanced_pytorch`) démontre des concepts de " +"fleur avancés avec PyTorch." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:1090 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" -" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" -" un seul ``valloader``). Ensuite, nous allons simuler une situation où " -"nous avons plusieurs ensembles de données dans plusieurs organisations et" -" où nous formons un modèle sur ces organisations à l'aide de " -"l'apprentissage fédéré." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "Mise à jour des paramètres du modèle" +"**Nouvel exemple de code JAX** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:1092 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " -"global au client, et le client met à jour le modèle local avec les " -"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " -"données locales (ce qui modifie les paramètres du modèle localement) et " -"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " -"alternativement, il renvoie seulement les gradients au serveur, et non " -"pas les paramètres complets du modèle)." +"Un nouvel exemple de code (`jax_from_centralized_to_federated`) montre " +"l'apprentissage fédéré avec JAX et Flower." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:1096 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " -"local avec les paramètres reçus du serveur et pour obtenir les paramètres" -" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " -"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " -"ci-dessus." +"Nouvelle option pour continuer à faire fonctionner Ray si Ray a déjà été " +"initialisé dans `start_simulation` " +"([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:1097 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -"Les détails de ce fonctionnement ne sont pas vraiment importants ici " -"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " -"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " -"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" -" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " -"sérialiser/désérialiser) :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "Mise en place d'un client Flower" +"Ajout de la prise en charge d'un `ClientManager` personnalisé comme " +"paramètre de `start_simulation` " +"([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:1098 +#, fuzzy msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"Ceci étant dit, passons à la partie intéressante. Les systèmes " -"d'apprentissage fédérés se composent d'un serveur et de plusieurs " -"clients. Dans Flower, nous créons des clients en mettant en œuvre des " -"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." -" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " -"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " -"de chaudière." +"Nouvelle documentation pour [mettre en œuvre des " +"stratégies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:1099 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -"Pour mettre en œuvre le client Flower, nous créons une sous-classe de " -"``flwr.client.NumPyClient`` et mettons en œuvre les trois méthodes " -"``get_parameters``, ``fit`` et ``evaluate`` :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +"Nouveau thème de documentation adapté aux mobiles " +"([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:1100 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " -"paramètres du modèle sur les données locales et renvoie les paramètres du" -" modèle (mis à jour) au serveur" +"Limite la plage de versions pour la dépendance (optionnelle) `ray` pour " +"n'inclure que les versions compatibles (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/ref-changelog.md:1104 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " -"paramètres du modèle sur les données locales et renvoie le résultat de " -"l'évaluation au serveur" +"**Supprime la prise en charge obsolète de Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:1105 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -"Nous avons mentionné que nos clients utiliseront les composants PyTorch " -"définis précédemment pour la formation et l'évaluation des modèles. " -"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" +"**Supprimez KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:1106 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"**Supprimer les installations supplémentaires no-op dépréciées** " +"([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "Utilisation du moteur du client virtuel" +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" +"**Supprimez les champs proto obsolètes de** `FitRes` **et** `EvaluateRes`" +" ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:1108 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " -"avec 10 clients sur une seule machine. Cela signifie que le serveur et " -"les 10 clients vivront sur une seule machine et partageront des " -"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " -"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " -"cela sur une seule machine peut rapidement épuiser les ressources mémoire" -" disponibles, même si seulement un sous-ensemble de ces clients participe" -" à un seul tour d'apprentissage fédéré." +"**Supprime la stratégie QffedAvg (remplacée par QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-changelog.md:1109 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "Commencer la formation" +"**Supprime la stratégie DefaultStrategy qui est obsolète** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:1110 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"Nous avons maintenant la classe ``FlowerClient`` qui définit " -"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " -"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " -"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " -"dernière étape consiste à démarrer la simulation réelle en utilisant " -"``flwr.simulation.start_simulation``." +"**Supprimer la prise en charge obsolète de la valeur de retour de la " +"précision eval_fn** ([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:1111 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"La fonction ``start_simulation`` accepte un certain nombre d'arguments, " -"parmi lesquels le ``client_fn`` utilisé pour créer les instances " -"``FlowerClient``, le nombre de clients à simuler (``num_clients``), le " -"nombre de tours d'apprentissage fédéré (``num_rounds``), et la stratégie." -" La stratégie encapsule l'approche/algorithme d'apprentissage fédéré, par" -" exemple, *Federated Averaging* (FedAvg)." +"**Supprime la prise en charge obsolète du passage des paramètres initiaux" +" en tant que ndarrays NumPy** " +"([#1142](https://github.com/adap/flower/pull/1142))" + +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:1117 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " -"pouvons également utiliser nos propres implémentations de stratégies pour" -" personnaliser presque tous les aspects de l'approche de l'apprentissage " -"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " -"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " -"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " -"deviné - démarre la simulation :" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "Dans les coulisses" +"**Amélioration de la compatibilité du moteur de client virtuel avec " +"Jupyter Notebook / Google Colab** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/ref-changelog.md:1119 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " -"simulation ?" +"Les simulations (utilisant le moteur de client virtuel via " +"`start_simulation`) fonctionnent maintenant plus facilement sur les " +"Notebooks Jupyter (y compris Google Colab) après avoir installé Flower " +"avec l'option `simulation` (`pip install 'flwr[simulation]'`)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:1121 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " -"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " -"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " -"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " -"il choisit 10 clients au hasard (c'est à dire 100% de 10)." +"**Nouvel exemple de code Jupyter Notebook** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-changelog.md:1123 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." -" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " -"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " -"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " -"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " -"d'apprentissage fédéré." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "Où est la précision ?" +"Un nouvel exemple de code (`quickstart_simulation`) démontre des " +"simulations de Flower en utilisant le moteur de client virtuel via " +"Jupyter Notebook (y compris Google Colab)." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:1125 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -"Tu as peut-être remarqué que toutes les mesures, à l'exception de " -"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " -"float(précision)}`` ?" +"**Propriétés du client (aperçu des fonctionnalités)** " +"([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:1127 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -"Flower peut automatiquement agréger les pertes renvoyées par les clients " -"individuels, mais il ne peut pas faire la même chose pour les mesures " -"dans le dictionnaire de mesures générique (celui avec la clé " -"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" -" mesures très différents et même des paires clé/valeur qui ne sont pas " -"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " -"comment les gérer automatiquement." +"Les clients peuvent implémenter une nouvelle méthode `get_properties` " +"pour permettre aux stratégies côté serveur d'interroger les propriétés du" +" client." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:1129 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -"En tant qu'utilisateurs, nous devons indiquer au framework comment " -"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" -" des fonctions d'agrégation de métriques à la stratégie. La stratégie " -"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " -"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " -"possibles sont ``fit_metrics_aggregation_fn`` et " -"``evaluate_metrics_aggregation_fn``." +"**Support expérimental d'Android avec TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-changelog.md:1131 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " -"la mesure de \"précision\" que nous renvoie ``evaluate`` :" +"La prise en charge d'Android est enfin arrivée dans `main` ! Flower est à" +" la fois agnostique au niveau du client et du cadre de travail. On peut " +"intégrer des plates-formes client arbitraires et avec cette version, " +"l'utilisation de Flower sur Android est devenue beaucoup plus facile." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:1133 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -"La seule chose qui reste à faire est d'indiquer à la stratégie d'appeler " -"cette fonction chaque fois qu'elle reçoit des dictionnaires de métriques " -"d'évaluation de la part des clients :" +"L'exemple utilise TFLite du côté client, ainsi qu'une nouvelle stratégie " +"`FedAvgAndroid`. Le client Android et `FedAvgAndroid` sont encore " +"expérimentaux, mais ils constituent un premier pas vers un SDK Android à " +"part entière et une implémentation unifiée de `FedAvg` intégrant la " +"nouvelle fonctionnalité de `FedAvgAndroid`." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-changelog.md:1135 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -"Nous avons maintenant un système complet qui effectue la formation " -"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " -"pondérée`` pour agréger les mesures d'évaluation personnalisées et " -"calcule une seule mesure de ``précision`` pour tous les clients du côté " -"du serveur." +"**Rendre le temps de garde gRPC configurable par l'utilisateur et " +"diminuer le temps de garde par défaut** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1137 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -"Les deux autres catégories de mesures (``pertes_centralisées`` et " -"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" -" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" -" tutoriel sur les fleurs couvrira l'évaluation centralisée." - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "Remarques finales" +"Le temps de keepalive gRPC par défaut a été réduit pour augmenter la " +"compatibilité de Flower avec davantage d'environnements cloud (par " +"exemple, Microsoft Azure). Les utilisateurs peuvent configurer le temps " +"de keepalive pour personnaliser la pile gRPC en fonction d'exigences " +"spécifiques." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-changelog.md:1139 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" -" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " -"fédéré avec Flower. La même approche que tu as vue peut être utilisée " -"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " -"et d'autres tâches (pas seulement la classification des images CIFAR-10)," -" par exemple le NLP avec Hugging Face Transformers ou la parole avec " -"SpeechBrain." +"**Nouvel exemple de confidentialité différentielle utilisant Opacus et " +"PyTorch** ([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/ref-changelog.md:1141 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " -"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " -"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" -" cela et bien plus encore dans le prochain tutoriel." +"Un nouvel exemple de code (`opacus`) démontre l'apprentissage fédéré " +"différentiellement privé avec Opacus, PyTorch et Flower." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -#, fuzzy +#: ../../source/ref-changelog.md:1143 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " -"`__ va plus en profondeur sur les stratégies et toutes les " -"choses avancées que tu peux construire avec elles." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -#, fuzzy -msgid "Use a federated learning strategy" -msgstr "Stratégie de moyenne fédérée." +"**Nouvel exemple de code pour les Transformers à visage embrassant** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -#, fuzzy +#: ../../source/ref-changelog.md:1145 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -"Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " -"fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " -"l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " -"`__)." +"Un nouvel exemple de code (`quickstart_huggingface`) démontre " +"l'utilisation des transformateurs Hugging Face avec Flower." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1147 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -"Dans ce carnet, nous allons commencer à personnaliser le système " -"d'apprentissage fédéré que nous avons construit dans le carnet " -"d'introduction (toujours en utilisant `Flower `__ et " -"`PyTorch `__)." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -#, fuzzy -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "Dépassons FedAvg avec les stratégies florales !" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "Personnalisation de la stratégie" +"**Nouvel exemple de code MLCube** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-changelog.md:1149 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -"Jusqu'à présent, tout devrait te sembler familier si tu as travaillé sur " -"le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " -"certain nombre de nouvelles fonctionnalités." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "Paramètres côté serveur **initialisation**" +"Un nouvel exemple de code (`quickstart_mlcube`) démontre l'utilisation de" +" MLCube avec Flower." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-changelog.md:1151 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -"Flower, par défaut, initialise le modèle global en demandant à un client " -"aléatoire les paramètres initiaux. Dans de nombreux cas, nous voulons " -"cependant avoir plus de contrôle sur l'initialisation des paramètres. " -"Flower te permet donc de passer directement les paramètres initiaux à la " -"Stratégie :" +"**([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-changelog.md:1153 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " -"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" -" nous regardons de près, nous pouvons voir que les journaux ne montrent " -"aucun appel à la méthode ``FlowerClient.get_parameters``." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "Commencer par une stratégie personnalisée" +"SSL permet d'établir des connexions cryptées et sécurisées entre les " +"clients et les serveurs. Cette version met en open-source " +"l'implémentation gRPC sécurisée de Flower afin de rendre les canaux de " +"communication cryptés accessibles à tous les utilisateurs de Flower." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-changelog.md:1155 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -"Elle accepte un certain nombre d'arguments, parmi lesquels le " -"``client_fn`` utilisé pour créer les instances de ``FlowerClient``, le " -"nombre de clients à simuler ``num_clients``, le nombre de rounds " -"``num_rounds``, et la stratégie." +"**Mise à jour** `FedAdam` **et** `FedYogi` **stratégies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-changelog.md:1157 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -"La stratégie englobe l'approche/l'algorithme d'apprentissage fédéré, par " -"exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " -"différente cette fois-ci :" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "Paramètre côté serveur **évaluation**" +"`FedAdam` et `FedAdam` correspondent à la dernière version de l'article " +"sur l'optimisation fédérée adaptative." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-changelog.md:1159 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -"Flower peut évaluer le modèle agrégé côté serveur ou côté client. Les " -"évaluations côté client et côté serveur sont similaires à certains " -"égards, mais différentes à d'autres." +"**Initialise** `start_simulation` **avec une liste d'ID de clients** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/ref-changelog.md:1161 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -"**L'évaluation centralisée** (ou *évaluation côté serveur*) est " -"conceptuellement simple : elle fonctionne de la même manière que " -"l'évaluation dans l'apprentissage automatique centralisé. S'il existe un " -"ensemble de données côté serveur qui peut être utilisé à des fins " -"d'évaluation, alors c'est parfait. Nous pouvons évaluer le modèle " -"nouvellement agrégé après chaque cycle de formation sans avoir à envoyer " -"le modèle aux clients. Nous avons également la chance que l'ensemble de " -"notre ensemble de données d'évaluation soit disponible à tout moment." +"`start_simulation` peut maintenant être appelé avec une liste " +"d'identifiants de clients (`clients_ids`, type : `List[str]`). Ces " +"identifiants seront passés à `client_fn` chaque fois qu'un client doit " +"être initialisé, ce qui peut faciliter le chargement de partitions de " +"données qui ne sont pas accessibles par des identifiants `int`." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:1165 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -"**L'évaluation fédérée** (ou évaluation côté client) est plus complexe, " -"mais aussi plus puissante : elle ne nécessite pas d'ensemble de données " -"centralisé et nous permet d'évaluer les modèles sur un plus grand " -"ensemble de données, ce qui donne souvent des résultats d'évaluation plus" -" réalistes. En fait, de nombreux scénarios exigent que nous utilisions " -"l'évaluation fédérée** si nous voulons obtenir des résultats d'évaluation" -" représentatifs. Mais cette puissance a un coût : une fois que nous " -"commençons à évaluer côté client, nous devons savoir que notre ensemble " -"de données d'évaluation peut changer au cours des cycles d'apprentissage " -"consécutifs si ces clients ne sont pas toujours disponibles. De plus, " -"l'ensemble de données détenu par chaque client peut également changer au " -"cours des cycles consécutifs. Cela peut conduire à des résultats " -"d'évaluation qui ne sont pas stables, donc même si nous ne changions pas " -"le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" -" cycles consécutifs." +"Mettre à jour le calcul de `num_examples` dans les exemples de code " +"PyTorch dans ([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-changelog.md:1166 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -"Nous avons vu comment l'évaluation fédérée fonctionne du côté client " -"(c'est-à-dire en implémentant la méthode ``evaluate`` dans " -"``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " -"paramètres du modèle agrégé du côté serveur :" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" +"Exposer la version de Flower à travers `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -#, fuzzy +#: ../../source/ref-changelog.md:1167 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"`start_server` dans `app.py` renvoie maintenant un objet `History` " +"contenant les métriques de l'entraînement " +"([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/ref-changelog.md:1168 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"Comment pouvons-nous donc envoyer ce dictionnaire de configuration du " -"serveur aux clients ? Les stratégies de Flower intégrées fournissent un " -"moyen de le faire, et cela fonctionne de la même façon que l'évaluation " -"côté serveur. Nous fournissons une fonction à la stratégie, et la " -"stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" -" :" +"Rendre `max_workers` (utilisé par `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-changelog.md:1169 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -"Ensuite, nous allons simplement passer cette fonction à la stratégie " -"FedAvg avant de commencer la simulation :" +"Augmente le temps de sommeil après le démarrage du serveur à trois " +"secondes dans tous les exemples de code " +"([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-changelog.md:1170 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -"Comme nous pouvons le voir, les journaux des clients incluent maintenant " -"le cycle actuel d'apprentissage fédéré (qu'ils lisent depuis le " -"dictionnaire ``config``). Nous pouvons également configurer " -"l'apprentissage local pour qu'il s'exécute pendant une époque au cours du" -" premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " -"époques au cours du troisième cycle." +"Ajout d'une nouvelle section FAQ à la documentation " +"([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1171 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -"Les clients peuvent également renvoyer des valeurs arbitraires au " -"serveur. Pour ce faire, ils renvoient un dictionnaire depuis ``fit`` " -"et/ou ``evaluate``. Nous avons vu et utilisé ce concept tout au long de " -"ce carnet sans le mentionner explicitement : notre ``FlowerClient`` " -"renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " -"tant que troisième valeur de retour dans ``evaluate``." - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "Mise à l'échelle de l'apprentissage fédéré" +"Et bien d'autres changements sous le capot, des mises à jour de la " +"bibliothèque, des modifications de la documentation et des améliorations " +"de l'outillage !" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-changelog.md:1175 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " -"Flower pour expérimenter avec un grand nombre de clients." +"**Supprimé** `flwr_example` **et** `flwr_experimental` **de la version " +"release build** ([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-changelog.md:1177 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -"Nous avons maintenant 1000 partitions, chacune contenant 45 exemples " -"d'entraînement et 5 exemples de validation. Etant donné que le nombre " -"d'exemples d'entraînement sur chaque client est assez faible, nous " -"devrions probablement entraîner le modèle un peu plus longtemps, nous " -"configurons donc les clients pour qu'ils effectuent 3 époques " -"d'entraînement local. Nous devrions également ajuster la fraction de " -"clients sélectionnés pour l'entraînement à chaque tour (nous ne voulons " -"pas que les 1000 clients participent à chaque tour), nous ajustons donc " -"``fraction_fit`` à ``0.05``, ce qui signifie que seulement 5% des clients" -" disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " -"chaque tour :" +"Les paquets `flwr_example` et `flwr_experimental` ont été dépréciés " +"depuis Flower 0.12.0 et ils ne sont plus inclus dans les builds de " +"Flower. Les extras associés (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) sont maintenant no-op et seront " +"supprimés dans une prochaine version." -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "" -"Dans ce carnet, nous avons vu comment nous pouvons progressivement " -"améliorer notre système en personnalisant la stratégie, en initialisant " -"les paramètres côté serveur, en choisissant une stratégie différente et " -"en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " -"si peu de code, n'est-ce pas ?" +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-changelog.md:1183 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -"Dans les sections ultérieures, nous avons vu comment nous pouvons " -"communiquer des valeurs arbitraires entre le serveur et les clients pour " -"personnaliser entièrement l'exécution côté client. Grâce à cette " -"capacité, nous avons construit une simulation d'apprentissage fédéré à " -"grande échelle en utilisant le moteur de client virtuel Flower et nous " -"avons mené une expérience impliquant 1000 clients dans la même charge de " -"travail - le tout dans un carnet Jupyter !" +"**Moteur expérimental de client virtuel** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -#, fuzzy +#: ../../source/ref-changelog.md:1185 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " -"`__ montre comment construire une ``Stratégie`` entièrement " -"personnalisée à partir de zéro." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "Qu'est-ce que l'apprentissage fédéré ?" +"L'un des objectifs de Flower est de permettre la recherche à grande " +"échelle. Cette version donne un premier aperçu (expérimental) d'une " +"nouvelle fonctionnalité majeure, connue sous le nom de code de moteur de " +"client virtuel. Les clients virtuels permettent des simulations qui " +"s'étendent à un (très) grand nombre de clients sur une seule machine ou " +"une grappe de calcul. La façon la plus simple de tester la nouvelle " +"fonctionnalité est de regarder les deux nouveaux exemples de code appelés" +" `quickstart_simulation` et `simulation_pytorch`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -#, fuzzy +#: ../../source/ref-changelog.md:1187 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"Dans ce tutoriel, tu apprendras ce qu'est l'apprentissage fédéré, tu " -"construiras ton premier système dans Flower, et tu l'étendras " -"progressivement. Si tu travailles sur toutes les parties du tutoriel, tu " -"seras capable de construire des systèmes d'apprentissage fédéré avancés " -"qui se rapprochent de l'état actuel de l'art dans le domaine." +"La fonction est encore expérimentale, il n'y a donc aucune garantie de " +"stabilité pour l'API. Elle n'est pas non plus tout à fait prête pour le " +"prime time et s'accompagne de quelques mises en garde connues. Cependant," +" les personnes curieuses sont encouragées à l'essayer et à faire part de " +"leurs réflexions." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-changelog.md:1189 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -"🧑‍🏫 Ce tutoriel part de zéro et n'attend aucune familiarité avec " -"l'apprentissage fédéré. Seule une compréhension de base de la science des" -" données et de la programmation Python est supposée." +"**Nouvelles stratégies intégrées** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -#, fuzzy +#: ../../source/ref-changelog.md:1191 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"`Star Flower on GitHub `__ ⭐️ et " -"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " -"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " -"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" -" le canal ``#questions``." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "Apprentissage automatique classique" +"FedYogi - Stratégie d'apprentissage fédéré utilisant Yogi côté serveur. " +"Mise en oeuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-changelog.md:1192 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -"Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " -"rapidement la façon dont la plupart des apprentissages automatiques " -"fonctionnent aujourd'hui." +"FedAdam - Stratégie d'apprentissage fédéré utilisant Adam côté serveur. " +"Mise en œuvre basée sur https://arxiv.org/abs/2003.00295" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-changelog.md:1194 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -"Dans l'apprentissage automatique, nous avons un modèle et des données. Le" -" modèle peut être un réseau neuronal (comme illustré ici), ou quelque " -"chose d'autre, comme la régression linéaire classique." +"**Nouvel exemple de code PyTorch Lightning** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" +"**Nouvel exemple de code d'autocodage variationnel** " +"([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "Modèle et données" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-changelog.md:1198 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -"Nous entraînons le modèle en utilisant les données pour effectuer une " -"tâche utile. Une tâche peut consister à détecter des objets dans des " -"images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" -" Go." +"**Nouvel exemple de code scikit-learn** " +"([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" +"**Nouvelle stratégie expérimentale TensorBoard** " +"([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "Entraîne le modèle à l'aide des données" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -#, fuzzy +#: ../../source/ref-changelog.md:1204 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -"Dans la pratique, les données d'entraînement avec lesquelles nous " -"travaillons ne proviennent pas de la machine sur laquelle nous entraînons" -" le modèle. Elles sont créées ailleurs." +"Amélioration de l'exemple de code TensorFlow avancé " +"([#769](https://github.com/adap/flower/pull/769))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -#, fuzzy +#: ../../source/ref-changelog.md:1205 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -"Elle prend naissance sur un smartphone par l'interaction de l'utilisateur" -" avec une application, une voiture qui collecte des données de capteurs, " -"un ordinateur portable qui reçoit des entrées via le clavier, ou un haut-" -"parleur intelligent qui écoute quelqu'un qui essaie de chanter une " -"chanson." +"Avertissement lorsque `min_available_clients` est mal configuré " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#: ../../source/ref-changelog.md:1206 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" +"Amélioration de la documentation sur le serveur gRPC " +"([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "Données sur un téléphone" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/ref-changelog.md:1207 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -"Il est également important de mentionner que cet \"ailleurs\" n'est " -"généralement pas un seul endroit, mais plusieurs. Il peut s'agir de " -"plusieurs appareils fonctionnant tous avec la même application. Mais il " -"peut également s'agir de plusieurs organisations, qui génèrent toutes des" -" données pour la même tâche." +"Amélioration du message d'erreur dans `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" +#: ../../source/ref-changelog.md:1208 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" +"Exemple de code de démarrage rapide PyTorch amélioré " +"([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "Les données se trouvent sur de nombreux appareils" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/ref-changelog.md:1212 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -"Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " -"d'analyse de données, l'approche utilisée par le passé consistait à " -"collecter toutes les données sur un serveur central. Ce serveur peut se " -"trouver quelque part dans un centre de données, ou quelque part dans le " -"cloud." +"**Désactivé l'évaluation finale distribuée** " +"([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#: ../../source/ref-changelog.md:1214 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" +"Le comportement précédent consistait à effectuer un dernier tour " +"d'évaluation distribuée sur tous les clients connectés, ce qui n'est " +"souvent pas nécessaire (par exemple, lors de l'utilisation de " +"l'évaluation côté serveur). Le comportement précédent peut être activé en" +" passant `force_final_distributed_eval=True` à `start_server`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "Collecte centralisée des données" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -#, fuzzy +#: ../../source/ref-changelog.md:1216 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -"Une fois que toutes les données sont rassemblées en un seul endroit, nous" -" pouvons enfin utiliser des algorithmes d'apprentissage automatique pour " -"entraîner notre modèle sur les données. C'est l'approche d'apprentissage " -"automatique sur laquelle nous nous sommes fondamentalement toujours " -"appuyés." +"**Renommé stratégie q-FedAvg** " +"([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" +#: ../../source/ref-changelog.md:1218 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" +"La stratégie nommée `QffedAvg` a été renommée en `QFedAvg` pour mieux " +"refléter la notation donnée dans l'article original (q-FFL est l'objectif" +" d'optimisation, q-FedAvg est le solveur proposé). Notez que la classe " +"`QffedAvg` originale (maintenant obsolète) est toujours disponible pour " +"des raisons de compatibilité (elle sera supprimée dans une prochaine " +"version)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "Formation au modèle central" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "Les défis de l'apprentissage automatique classique" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/ref-changelog.md:1220 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"L'approche classique de l'apprentissage automatique que nous venons de " -"voir peut être utilisée dans certains cas. Parmi les grands exemples, on " -"peut citer la catégorisation des photos de vacances, ou l'analyse du " -"trafic web. Des cas, où toutes les données sont naturellement disponibles" -" sur un serveur centralisé." +"**Exemple de code déprécié et renommé** `simulation_pytorch` **en** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#: ../../source/ref-changelog.md:1222 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" +"Cet exemple a été remplacé par un nouvel exemple. Le nouvel exemple est " +"basé sur le moteur expérimental du client virtuel, qui deviendra la " +"nouvelle méthode par défaut pour effectuer la plupart des types de " +"simulations à grande échelle dans Flower. L'exemple existant a été " +"conservé à des fins de référence, mais il pourrait être supprimé à " +"l'avenir." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "Possibilité de centralisation" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/ref-changelog.md:1228 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -"Mais cette approche ne peut pas être utilisée dans de nombreux autres cas" -" : lorsque les données ne sont pas disponibles sur un serveur centralisé," -" ou lorsque les données disponibles sur un serveur ne sont pas " -"suffisantes pour former un bon modèle." +"**Nouvelles stratégies intégrées** " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "(résumé) FedOpt" + +#: ../../source/ref-changelog.md:1233 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" +"**Métriques personnalisées pour le serveur et les stratégies** " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "Impossible de centraliser" +#: ../../source/ref-changelog.md:1235 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" +"Le serveur Flower est maintenant totalement agnostique, toutes les " +"instances restantes de métriques spécifiques à une tâche (telles que " +"`accuracy`) ont été remplacées par des dictionnaires de métriques " +"personnalisées. Flower 0.15 a introduit la possibilité de passer un " +"dictionnaire contenant des métriques personnalisées du client au serveur." +" À partir de cette version, les métriques personnalisées remplacent les " +"métriques spécifiques à une tâche sur le serveur." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/ref-changelog.md:1237 #, fuzzy msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -"Il existe de nombreuses raisons pour lesquelles l'approche classique " -"centralisée de l'apprentissage automatique ne fonctionne pas pour un " -"grand nombre de cas d'utilisation très importants dans le monde réel, " -"notamment :" +"Les dictionnaires de métriques personnalisés sont maintenant utilisés " +"dans deux API orientées vers l'utilisateur : ils sont renvoyés par les " +"méthodes de stratégie `aggregate_fit`/`aggregate_evaluate` et ils " +"permettent aux fonctions d'évaluation passées aux stratégies intégrées " +"(via `eval_fn`) de renvoyer plus de deux métriques d'évaluation. Les " +"stratégies peuvent même renvoyer des dictionnaires de métriques " +"*agrégées* pour que le serveur puisse en garder la trace." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/ref-changelog.md:1239 #, fuzzy msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -"**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " -"LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " -"FSS (Russie), CDPR (Chine), PDPB (Inde), PIPA (Corée), APPI (Japon), PDP " -"(Indonésie), PDPA (Singapour), APP (Australie), et d'autres " -"réglementations protègent les données sensibles contre le déplacement. En" -" fait, ces réglementations empêchent même parfois des organisations " -"individuelles de combiner les données de leurs propres utilisateurs pour " -"la formation à l'intelligence artificielle parce que ces utilisateurs " -"vivent dans différentes parties du monde, et que leurs données sont " -"régies par des réglementations différentes en matière de protection des " -"données." +"Les implémentations de Stratey doivent migrer leurs méthodes " +"`aggregate_fit` et `aggregate_evaluate` vers le nouveau type de retour " +"(par exemple, en renvoyant simplement un `{}` vide), les fonctions " +"d'évaluation côté serveur doivent migrer de `return loss, accuracy` à " +"`return loss, {\"accuracy\" : accuracy}`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/ref-changelog.md:1241 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -"**Préférence de l'utilisateur** : En plus de la réglementation, il existe" -" des cas d'utilisation où les utilisateurs s'attendent tout simplement à " -"ce qu'aucune donnée ne quitte leur appareil, jamais. Si tu tapes tes mots" -" de passe et tes informations de carte de crédit sur le clavier numérique" -" de ton téléphone, tu ne t'attends pas à ce que ces mots de passe " -"finissent sur le serveur de l'entreprise qui a développé ce clavier, n" -"'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " -"l'apprentissage fédéré a été inventé en premier lieu." +"Les types de retour du style Flower 0.15 sont dépréciés (mais toujours " +"pris en charge), la compatibilité sera supprimée dans une prochaine " +"version." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -#, fuzzy +#: ../../source/ref-changelog.md:1243 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -"**volume de données** : certains capteurs, comme les caméras, produisent " -"un volume de données si important qu'il n'est ni possible ni économique " -"de collecter toutes les données (en raison, par exemple, de la bande " -"passante ou de l'efficacité des communications). Pensez à un service " -"ferroviaire national comptant des centaines de gares à travers le pays. " -"Si chacune de ces gares est équipée d'un certain nombre de caméras de " -"sécurité, le volume de données brutes sur les appareils qu'elles " -"produisent nécessite une infrastructure incroyablement puissante et " -"excessivement coûteuse pour les traiter et les stocker. Et la plupart de " -"ces données ne sont même pas utiles." +"**Avertissements de migration pour les fonctionnalités obsolètes** " +"([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" +#: ../../source/ref-changelog.md:1245 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -"Voici quelques exemples où l'apprentissage automatique centralisé ne " -"fonctionne pas :" +"Les versions antérieures de Flower ont souvent été migrées vers de " +"nouvelles API, tout en maintenant la compatibilité avec les anciennes " +"API. Cette version introduit des messages d'avertissement détaillés si " +"l'utilisation d'API obsolètes est détectée. Les nouveaux messages " +"d'avertissement fournissent souvent des détails sur la façon de migrer " +"vers des API plus récentes, facilitant ainsi la transition d'une version " +"à l'autre." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -#, fuzzy +#: ../../source/ref-changelog.md:1247 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -"Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " -"former des modèles de détection du cancer" +"Amélioration des docs et des docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "Exemple et documentation MXNet" + +#: ../../source/ref-changelog.md:1251 msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -"Informations financières provenant de différentes organisations pour " -"détecter les fraudes financières" +"Mise en œuvre de FedBN dans l'exemple PyTorch : De la centralisation à la" +" fédération ([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -"Les données de localisation de ta voiture électrique pour mieux prédire " -"l'autonomie" +"**Serveur agnostique de sérialisation** " +"([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/ref-changelog.md:1257 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -"Messages cryptés de bout en bout pour former de meilleurs modèles " -"d'autocomplétion" +"Le serveur Flower est désormais totalement agnostique en matière de " +"sérialisation. L'utilisation antérieure de la classe `Weights` (qui " +"représente les paramètres sous forme de tableaux NumPy désérialisés) a " +"été remplacée par la classe `Parameters` (par exemple, dans `Strategy`). " +"Les objets `Parameters` sont totalement agnostiques en matière de " +"sérialisation et représentent les paramètres sous forme de tableaux " +"d'octets, les attributs `tensor_type` indiquent comment ces tableaux " +"d'octets doivent être interprétés (par exemple, pour la " +"sérialisation/désérialisation)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -#, fuzzy +#: ../../source/ref-changelog.md:1259 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -"La popularité des systèmes améliorant la confidentialité comme le " -"navigateur `Brave `__ ou le messager `Signal " -"`__ montre que les utilisateurs se soucient de la " -"confidentialité. En fait, ils choisissent la version améliorant la " -"confidentialité plutôt que d'autres alternatives, si une telle " -"alternative existe. Mais que pouvons-nous faire pour appliquer " -"l'apprentissage automatique et la science des données à ces cas afin " -"d'utiliser les données privées ? Après tout, ce sont tous des domaines " -"qui bénéficieraient de manière significative des récentes avancées en " -"matière d'IA." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "Apprentissage fédéré" +"Les stratégies intégrées mettent en œuvre cette approche en gérant en " +"interne la sérialisation et la désérialisation de `Weights`. Les " +"implémentations de stratégies personnalisées ou tierces doivent être " +"mises à jour avec les définitions de méthodes de stratégie légèrement " +"modifiées. Les auteurs de stratégies peuvent consulter le PR " +"[#721](https://github.com/adap/flower/pull/721) pour voir comment les " +"stratégies peuvent facilement migrer vers le nouveau format." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/ref-changelog.md:1261 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -"L'apprentissage fédéré inverse simplement cette approche. Il permet " -"l'apprentissage automatique sur des données distribuées en déplaçant la " -"formation vers les données, au lieu de déplacer les données vers la " -"formation. Voici l'explication en une seule phrase :" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "Apprentissage automatique central : déplace les données vers le calcul" +"Déclassé `flwr.server.Server.evaluate`, utiliser " +"`flwr.server.Server.evaluate_round` à la place " +"([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/ref-changelog.md:1267 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -"Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " -"d'autres approches de science des données) dans des domaines où cela " -"n'était pas possible auparavant. Nous pouvons désormais former " -"d'excellents modèles d'IA médicale en permettant à différents hôpitaux de" -" travailler ensemble. Nous pouvons résoudre les fraudes financières en " -"formant des modèles d'IA sur les données de différentes institutions " -"financières. Nous pouvons créer de nouvelles applications d'amélioration " -"de la confidentialité (telles que la messagerie sécurisée) qui ont une " -"meilleure IA intégrée que leurs alternatives d'amélioration de la " -"confidentialité. Et ce ne sont là que quelques exemples qui me viennent à" -" l'esprit. Au fur et à mesure que nous déployons l'apprentissage fédéré, " -"nous découvrons de plus en plus de domaines qui peuvent soudainement être" -" réinventés parce qu'ils ont maintenant accès à de vastes quantités de " -"données auparavant inaccessibles." +"**Initialisation des paramètres côté serveur** " +"([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/ref-changelog.md:1269 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -"Comment fonctionne l'apprentissage fédéré ? Commençons par une " -"explication intuitive." - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "L'apprentissage fédéré en cinq étapes" +"Les paramètres du modèle peuvent maintenant être initialisés côté " +"serveur. L'initialisation des paramètres côté serveur fonctionne via une " +"nouvelle méthode `Strategy` appelée `initialize_parameters`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "Étape 0 : Initialisation du modèle global" +#: ../../source/ref-changelog.md:1271 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" +"Les stratégies intégrées prennent en charge un nouvel argument du " +"constructeur appelé `initial_parameters` pour définir les paramètres " +"initiaux. Les stratégies intégrées fourniront ces paramètres initiaux au " +"serveur au démarrage et les supprimeront ensuite pour libérer la mémoire." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/ref-changelog.md:1290 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -"Nous commençons par initialiser le modèle sur le serveur. C'est " -"exactement la même chose dans l'apprentissage centralisé classique : nous" -" initialisons les paramètres du modèle, soit de façon aléatoire, soit à " -"partir d'un point de contrôle précédemment sauvegardé." +"Si aucun paramètre initial n'est fourni à la stratégie, le serveur " +"continuera à utiliser le comportement actuel (à savoir qu'il demandera à " +"l'un des clients connectés ses paramètres et les utilisera comme " +"paramètres globaux initiaux)." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" +"Déclasser `flwr.server.strategy.DefaultStrategy` (migrer vers " +"`flwr.server.strategy.FedAvg`, qui est équivalent)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "Initialise le modèle global" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/ref-changelog.md:1300 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -"Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" -" connectés (nœuds clients)" +"**Généralisé** `Client.fit` **et** `Client.evaluate` **valeurs de " +"retour** ([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -#, fuzzy +#: ../../source/ref-changelog.md:1302 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -"Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " -"connectés (par exemple, les appareils périphériques comme les smartphones" -" ou les serveurs appartenant à des organisations). Cela permet de " -"s'assurer que chaque nœud participant commence sa formation locale en " -"utilisant les mêmes paramètres de modèle. Nous n'utilisons souvent que " -"quelques-uns des nœuds connectés au lieu de tous les nœuds. La raison en " -"est que la sélection d'un nombre croissant de nœuds clients a des " -"rendements décroissants." +"Les clients peuvent maintenant renvoyer un dictionnaire supplémentaire " +"associant les clés `str` aux valeurs des types suivants : `bool`, " +"`bytes`, `float`, `int`, `str`. Cela signifie que l'on peut renvoyer des " +"valeurs presque arbitraires de `fit`/`evaluate` et les utiliser du côté " +"du serveur !" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" +#: ../../source/ref-changelog.md:1304 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" +"Cette amélioration a également permis de rendre plus cohérents les types " +"de retour entre `fit` et `evaluate` : `evaluate` devrait maintenant " +"retourner un tuple `(float, int, dict)` représentant la perte, le nombre " +"d'exemples, et un dictionnaire contenant des valeurs arbitraires " +"spécifiques au problème comme la précision." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "Envoyer le modèle global" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/ref-changelog.md:1306 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -"Étape 2 : Entraîne le modèle localement sur les données de chaque " -"organisation/appareil (nœud client)" +"Au cas où tu te poserais la question : cette fonctionnalité est " +"compatible avec les projets existants, la valeur de retour supplémentaire" +" du dictionnaire est facultative. Le nouveau code doit cependant migrer " +"vers les nouveaux types de retour pour être compatible avec les " +"prochaines versions de Flower (`fit` : `List[np.ndarray], int, Dict[str, " +"Scalar]`, `evaluate` : `float, int, Dict[str, Scalar]`). Voir l'exemple " +"ci-dessous pour plus de détails." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/ref-changelog.md:1308 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -"Maintenant que tous les nœuds clients (sélectionnés) disposent de la " -"dernière version des paramètres du modèle global, ils commencent " -"l'entraînement local. Ils utilisent leur propre ensemble de données " -"locales pour entraîner leur propre modèle local. Ils n'entraînent pas le " -"modèle jusqu'à la convergence totale, mais ils ne s'entraînent que " -"pendant un petit moment. Il peut s'agir d'une seule époque sur les " -"données locales, ou même de quelques étapes (mini-batchs)." +"*Exemple de code:* note les valeurs de retour du dictionnaire " +"supplémentaires dans `FlwrClient.fit` et `FlwrClient.evaluate` :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#: ../../source/ref-changelog.md:1323 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" +"**Généralisé** `config` **argument dans** `Client.fit` **et** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "Forme-toi aux données locales" +#: ../../source/ref-changelog.md:1325 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" +"L'argument `config` était auparavant de type `Dict[str, str]`, ce qui " +"signifie que les valeurs du dictionnaire devaient être des chaînes. La " +"nouvelle version généralise cela pour permettre les valeurs des types " +"suivants : `bool`, `bytes`, `float`, `int`, `str`." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" +#: ../../source/ref-changelog.md:1327 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" +"Cela signifie que l'on peut maintenant passer des valeurs presque " +"arbitraires à `fit`/`evaluate` en utilisant le dictionnaire `config`. " +"Yay, plus de `str(epochs)` du côté serveur et `int(config[\"epochs\"])` " +"du côté client !" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/ref-changelog.md:1329 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -"Après l'entraînement local, chaque nœud client possède une version " -"légèrement différente des paramètres du modèle qu'il a reçus à l'origine." -" Les paramètres sont tous différents parce que chaque nœud client a des " -"exemples différents dans son ensemble de données local. Les nœuds clients" -" renvoient ensuite ces mises à jour du modèle au serveur. Les mises à " -"jour du modèle qu'ils envoient peuvent être soit les paramètres complets " -"du modèle, soit seulement les gradients qui ont été accumulés au cours de" -" l'entraînement local." +"*Exemple de code:* Notez que le dictionnaire `config` contient maintenant" +" des valeurs autres que `str` dans `Client.fit` et `Client.evaluate` :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" + +#: ../../source/ref-changelog.md:1350 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" +"Nouvel exemple : PyTorch de centralisé à fédéré " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "Envoyer les mises à jour du modèle" +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "Amélioration de la documentation" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -"Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " -"global" +"Nouveau thème de documentation " +"([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "Nouvelle référence API ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:1354 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -"Le serveur reçoit les mises à jour du modèle des nœuds clients " -"sélectionnés. S'il a sélectionné 100 nœuds clients, il dispose maintenant" -" de 100 versions légèrement différentes du modèle global original, " -"chacune ayant été formée sur les données locales d'un client. Mais ne " -"voulions-nous pas avoir un seul modèle qui contienne les apprentissages " -"des données de l'ensemble des 100 nœuds clients ?" +"Mise à jour de la documentation des exemples " +"([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/ref-changelog.md:1355 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"Suppression de la documentation obsolète " +"([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "Correction de bogues :" + +#: ../../source/ref-changelog.md:1359 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" +"`Server.fit` ne déconnecte pas les clients lorsqu'il est terminé, la " +"déconnexion des clients est maintenant gérée dans " +"`flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "Mises à jour globales du modèle" +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "Changements importants :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/ref-changelog.md:1365 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -"Les étapes 1 à 4 constituent ce que nous appelons un cycle unique " -"d'apprentissage fédéré. Les paramètres du modèle global sont envoyés aux " -"nœuds clients participants (étape 1), les nœuds clients s'entraînent sur " -"leurs données locales (étape 2), ils envoient leurs modèles mis à jour au" -" serveur (étape 3), et le serveur agrège ensuite les mises à jour du " -"modèle pour obtenir une nouvelle version du modèle global (étape 4)." +"Ajout d'un exemple pour les périphériques embarqués " +"([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -#, fuzzy +#: ../../source/ref-changelog.md:1366 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -"Au cours d'un seul tour, chaque nœud client qui participe à cette " -"itération ne s'entraîne que pendant un petit moment. Cela signifie " -"qu'après l'étape d'agrégation (étape 4), nous avons un modèle qui a été " -"entraîné sur toutes les données de tous les nœuds clients participants, " -"mais seulement pendant un petit moment. Nous devons ensuite répéter ce " -"processus d'entraînement encore et encore pour finalement arriver à un " -"modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " -"données de tous les nœuds clients." +"Ajout d'un nouveau NumPyClient (en plus du KerasClient existant) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/ref-changelog.md:1367 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -"Félicitations, tu comprends maintenant les bases de l'apprentissage " -"fédéré. Il y a bien sûr beaucoup plus à discuter, mais c'était " -"l'apprentissage fédéré en quelques mots. Dans les parties suivantes de ce" -" tutoriel, nous irons plus en détail. Les questions intéressantes " -"comprennent : comment pouvons-nous sélectionner les meilleurs nœuds " -"clients qui devraient participer au prochain tour ? Quelle est la " -"meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" -"nous gérer les nœuds clients qui échouent (stragglers) ?" +"Déclassement du paquet `flwr_example` et migration des exemples dans le " +"répertoire de premier niveau `examples` " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -#, fuzzy +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" + +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "Changements incompatibles :" + +#: ../../source/ref-changelog.md:1373 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -"Tout comme nous pouvons former un modèle sur les données décentralisées " -"de différents nœuds clients, nous pouvons également évaluer le modèle sur" -" ces données pour recevoir des mesures précieuses. C'est ce qu'on appelle" -" l'évaluation fédérée, parfois abrégée en FE. En fait, l'évaluation " -"fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" -" fédéré." +"Renommé les méthodes de stratégie " +"([#486](https://github.com/adap/flower/pull/486)) pour unifier le nommage" +" des API publiques de Flower. D'autres méthodes/fonctions publiques (par " +"exemple, toutes les méthodes de `Client`, mais aussi `Strategy.evaluate`)" +" n'utilisent pas le préfixe `on_`, c'est pourquoi nous le supprimons des " +"quatre méthodes de Stratégie. Pour migrer, renommez les méthodes de " +"`Strategy` suivantes en conséquence :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "Analyses fédérées" +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" + +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" + +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" + +#: ../../source/ref-changelog.md:1381 msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -"Dans de nombreux cas, l'apprentissage automatique n'est pas nécessaire " -"pour tirer de la valeur des données. L'analyse des données peut donner " -"des indications précieuses, mais là encore, il n'y a souvent pas assez de" -" données pour obtenir une réponse claire. Quel est l'âge moyen auquel les" -" gens développent un certain type de problème de santé ? L'analyse " -"fédérée permet de telles requêtes sur plusieurs nœuds clients. Elle est " -"généralement utilisée en conjonction avec d'autres technologies de " -"renforcement de la confidentialité, comme l'agrégation sécurisée, pour " -"empêcher le serveur de voir les résultats soumis par les nœuds clients " -"individuels." +"Déclassé `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). Pour migrer, utilisez " +"`FedAvg` à la place." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +#: ../../source/ref-changelog.md:1382 msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" +"Exemples simplifiés et lignes de base " +"([#484](https://github.com/adap/flower/pull/484))." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Fleur" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +#: ../../source/ref-changelog.md:1383 msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -"L'apprentissage fédéré, l'évaluation fédérée et l'analyse fédérée " -"nécessitent une infrastructure pour déplacer les modèles d'apprentissage " -"automatique dans les deux sens, les entraîner et les évaluer sur des " -"données locales, puis agréger les modèles mis à jour. Flower fournit " -"l'infrastructure pour faire exactement cela de manière simple, évolutive " -"et sécurisée. En bref, Flower présente une approche unifiée de " -"l'apprentissage, de l'analyse et de l'évaluation fédérés. Il permet à " -"l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " -"quel cadre de ML et n'importe quel langage de programmation." +"Suppression de `on_conclude_round` actuellement inutilisé de l'interface " +"de stratégie ([#483](https://github.com/adap/flower/pull/483))." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#: ../../source/ref-changelog.md:1384 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" +"Fixe la version minimale de Python à 3.6.1 au lieu de 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +#: ../../source/ref-changelog.md:1385 msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -"Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " -"scooter, ordinateur personnel, roomba et téléphone)" +"Amélioration des docstrings `Stratégie` " +"([#470](https://github.com/adap/flower/pull/470))." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +#: ../../source/ref-example-projects.rst:2 +#, fuzzy +msgid "Example projects" +msgstr "Exemples de PyTorch" + +#: ../../source/ref-example-projects.rst:4 msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -"Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " -"et son rapport avec l'apprentissage automatique classique (centralisé) !" +"Flower est livré avec un certain nombre d'exemples d'utilisation, qui " +"montrent comment Flower peut être utilisé pour fédérer différents types " +"de pipelines d'apprentissage automatique existants, qui s'appuient " +"généralement sur des frameworks d'apprentissage automatique populaires " +"tels que `PyTorch `_ ou `TensorFlow " +"`_." -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +#: ../../source/ref-example-projects.rst:9 +#, fuzzy +msgid "The following examples are available as standalone projects." +msgstr "Les exemples suivants sont disponibles sous forme de projets autonomes." + +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/ref-example-projects.rst:14 msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -"Dans la prochaine partie de ce tutoriel, nous allons construire un " -"premier système d'apprentissage fédéré avec Flower." +"L'exemple de démarrage rapide TensorFlow/Keras montre la classification " +"d'images CIFAR-10 avec MobileNetV2 :" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +#: ../../source/ref-example-projects.rst:17 #, fuzzy msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -"Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " -"`__ " -"montre comment construire un système d'apprentissage fédéré simple avec " -"PyTorch et Flower." +"`Quickstart TensorFlow (Code) " +"`_" -#~ msgid "Flower CLI commands" -#~ msgstr "Commandes CLI Flower" +#: ../../source/ref-example-projects.rst:19 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" +"`Quickstart TensorFlow (Tutorial) `_" -#~ msgid "Contributor guide" -#~ msgstr "Guide pour les contributeurs" +#: ../../source/ref-example-projects.rst:20 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" +"`Quickstart TensorFlow (Blog Post) `_" -#~ msgid "API Reference - Flower CLI commands" -#~ msgstr "Référence API - Commandes CLI pour Flower" +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "Démarrage rapide de PyTorch" -#~ msgid "API Reference - flwr (Python package)" -#~ msgstr "Référence API - flwr (paquetage Python)" +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "" +"L'exemple de démarrage rapide PyTorch montre la classification d'images " +"CIFAR-10 avec un simple réseau neuronal convolutif :" -#~ msgid "Flower client." -#~ msgstr "Client de Flower" +#: ../../source/ref-example-projects.rst:29 +#, fuzzy +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" +"`Quickstart PyTorch (Code) " +"`_" -#~ msgid "Abstract base class for Flower clients." -#~ msgstr "" +#: ../../source/ref-example-projects.rst:31 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" +"`Quickstart PyTorch (Tutorial) `_" -#~ msgid "Evaluate the provided parameters using the locally held dataset." -#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch : De la centralisation à la fédération" -#~ msgid "Parameters" -#~ msgstr "Paramètres du modèle." +#: ../../source/ref-example-projects.rst:36 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "" +"Cet exemple montre comment un projet PyTorch ordinaire peut être fédéré à" +" l'aide de Flower :" -#~ msgid "" -#~ "The evaluation instructions containing " -#~ "(global) model parameters received from " -#~ "the server and a dictionary of " -#~ "configuration values used to customize " -#~ "the local evaluation process." -#~ msgstr "" +#: ../../source/ref-example-projects.rst:38 +#, fuzzy +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" +"`PyTorch : De la centralisation à la fédération (Code) " +"`_" -#~ msgid "Returns" -#~ msgstr "Ressources" +#: ../../source/ref-example-projects.rst:40 +#, fuzzy +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" +"`PyTorch : De la centralisation à la fédération (Tutoriel) " +"`_" -#~ msgid "" -#~ "The evaluation result containing the " -#~ "loss on the local dataset and " -#~ "other details such as the number " -#~ "of local data examples used for " -#~ "evaluation." -#~ msgstr "" +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "Apprentissage fédéré sur Raspberry Pi et Nvidia Jetson" -#~ msgid "Return type" -#~ msgstr "" +#: ../../source/ref-example-projects.rst:46 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "" +"Cet exemple montre comment Flower peut être utilisé pour construire un " +"système d'apprentissage fédéré qui fonctionne sur Raspberry Pi et Nvidia " +"Jetson :" -#~ msgid "Refine the provided parameters using the locally held dataset." -#~ msgstr "" +#: ../../source/ref-example-projects.rst:49 +#, fuzzy +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Code) " +"`_" -#~ msgid "" -#~ "The training instructions containing (global)" -#~ " model parameters received from the " -#~ "server and a dictionary of configuration" -#~ " values used to customize the local" -#~ " training process." -#~ msgstr "" +#: ../../source/ref-example-projects.rst:51 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" +"`L'apprentissage fédéré sur Raspberry Pi et Nvidia Jetson (Blog Post) " +"`_" -#~ msgid "" -#~ "The training result containing updated " -#~ "parameters and other details such as " -#~ "the number of local training examples" -#~ " used for training." -#~ msgstr "" +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "" +"Cette page rassemble les réponses aux questions les plus fréquemment " +"posées sur l'apprentissage fédéré avec Flower." -#~ msgid "Return the current local model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr "" +":fa:`eye,mr-1` Flower peut-il fonctionner sur les ordinateurs portables " +"Juptyter / Google Colab ?" -#~ msgid "" -#~ "The get parameters instructions received " -#~ "from the server containing a dictionary" -#~ " of configuration values." -#~ msgstr "" +#: ../../source/ref-faq.rst:9 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "" +"Oui, c'est possible ! Flower est même livré avec quelques optimisations " +"pour qu'il fonctionne encore mieux sur Colab. Voici un exemple de " +"démarrage rapide :" -#~ msgid "The current local model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-faq.rst:11 +#, fuzzy +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" -#~ msgid "Return set of client's properties." -#~ msgstr "" +#: ../../source/ref-faq.rst:12 +#, fuzzy +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" +"`Flower Quickstart (TensorFlow/Keras) " +"`_" -#~ msgid "" -#~ "The get properties instructions received " -#~ "from the server containing a dictionary" -#~ " of configuration values." -#~ msgstr "" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" +":fa:`eye,mr-1` Comment puis-je faire fonctionner l'apprentissage fédéré " +"sur un Raspberry Pi ?" -#~ msgid "The current client properties." -#~ msgstr "" +#: ../../source/ref-faq.rst:16 +#, fuzzy +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" +"Trouve le `blog post about federated learning on embedded device ici " +"`_" +" et l'exemple de code GitHub correspondant " +"`_." -#~ msgid "Start a Flower client node which connects to a Flower server." -#~ msgstr "" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" +":fa:`eye,mr-1` Est-ce que Flower prend en charge l'apprentissage fédéré " +"sur les appareils Android ?" -#~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " server. If the Flower server runs" -#~ " on the same machine on port " -#~ "8080, then `server_address` would be " -#~ "`\"[::]:8080\"`." -#~ msgstr "" +#: ../../source/ref-faq.rst:20 +#, fuzzy +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" +"Oui. Jetez un coup d'œil à notre `blog post " +"`_ ou consultez l'`exemple de code Android sur GitHub " +"`_." -#~ msgid "An implementation of the abstract base class `flwr.client.Client`." -#~ msgstr "" +#: ../../source/ref-faq.rst:22 +msgid "" +"`Android Kotlin example `_" +msgstr "" -#~ msgid "" -#~ "The maximum length of gRPC messages " -#~ "that can be exchanged with the " -#~ "Flower server. The default should be " -#~ "sufficient for most models. Users who" -#~ " train very large models might need" -#~ " to increase this value. Note that" -#~ " the Flower server needs to be " -#~ "started with the same value (see " -#~ "`flwr.server.start_server`), otherwise it will " -#~ "not know about the increased limit " -#~ "and block larger messages." -#~ msgstr "" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "" -#~ msgid "" -#~ "The PEM-encoded root certificates as " -#~ "a byte string or a path string." -#~ " If provided, a secure connection " -#~ "using the certificates will be " -#~ "established to an SSL-enabled Flower " -#~ "server." -#~ msgstr "" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" +":fa:`eye,mr-1` Puis-je combiner l'apprentissage fédéré avec la blockchain" +" ?" -#~ msgid "" -#~ "DEPRECATED - USE 'transport' INSTEAD. " -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might change " -#~ "considerably in future versions of " -#~ "Flower." -#~ msgstr "" -#~ "DÉPRÉCIÉ - UTILISER 'transport' À LA " -#~ "PLACE Définit si le client interagit " -#~ "ou non avec le serveur à l'aide" -#~ " de l'API REST expérimentale. Cette " -#~ "fonctionnalité est expérimentale, elle " -#~ "pourrait changer considérablement dans les " -#~ "futures versions de Flower." +#: ../../source/ref-faq.rst:27 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "" +"Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " +"environnement blockchain est disponible ici :" -#~ msgid "" -#~ "Configure the transport layer. Allowed " -#~ "values: - 'grpc-bidi': gRPC, " -#~ "bidirectional streaming - 'grpc-rere': " -#~ "gRPC, request-response (experimental) - " -#~ "'rest': HTTP (experimental)" -#~ msgstr "" -#~ "Valeurs autorisées : - 'grpc-bidi' " -#~ ": gRPC, flux bidirectionnel - 'grpc-" -#~ "rere' : gRPC, requête-réponse " -#~ "(expérimental) - 'rest' : HTTP " -#~ "(expérimental)" +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" -#~ msgid "Starting a gRPC client with an insecure server connection:" -#~ msgstr "" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" -#~ msgid "Starting an SSL-enabled gRPC client:" -#~ msgstr "" +#: ../../source/ref-faq.rst:31 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "Mise à l'échelle de l'apprentissage fédéré" -#~ msgid "Abstract base class for Flower clients using NumPy." -#~ msgstr "" +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" +"`Flower meets Nevermined GitHub Repository `_." -#~ msgid "The current (global) model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-faq.rst:33 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" +"`Flower rencontre Nevermined vidéo YouTube " +"`_." -#~ msgid "" -#~ "Configuration parameters which allow the " -#~ "server to influence evaluation on the" -#~ " client. It can be used to " -#~ "communicate arbitrary values from the " -#~ "server to the client, for example, " -#~ "to influence the number of examples " -#~ "used for evaluation." -#~ msgstr "" +#: ../../source/ref-faq.rst:34 +#, fuzzy +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" +"`Flower rencontre KOSMoS `_." -#~ msgid "" -#~ "* **loss** (*float*) -- The evaluation" -#~ " loss of the model on the local" -#~ " dataset. * **num_examples** (*int*) -- " -#~ "The number of examples used for " -#~ "evaluation. * **metrics** (*Dict[str, " -#~ "Scalar]*) -- A dictionary mapping " -#~ "arbitrary string keys to values of " -#~ "type bool, bytes, float, int, or " -#~ "str. It can be used to " -#~ "communicate arbitrary values back to the" -#~ " server." -#~ msgstr "" +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" +"`Flower meets Talan blog post `_ ." -#~ msgid "" -#~ "**loss** (*float*) -- The evaluation " -#~ "loss of the model on the local " -#~ "dataset." -#~ msgstr "" +#: ../../source/ref-faq.rst:36 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" +"`Flower rencontre Talan Dépôt GitHub " +"`_ ." -#~ msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -#~ msgstr "" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "Télémétrie" -#~ msgid "" -#~ "**metrics** (*Dict[str, Scalar]*) -- A " -#~ "dictionary mapping arbitrary string keys " -#~ "to values of type bool, bytes, " -#~ "float, int, or str. It can be " -#~ "used to communicate arbitrary values " -#~ "back to the server." -#~ msgstr "" +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" +"Le projet open-source Flower recueille des mesures d'utilisation " +"**anonymes** afin de prendre des décisions éclairées pour améliorer " +"Flower. Cela permet à l'équipe de Flower de comprendre comment Flower est" +" utilisé et quels sont les défis auxquels les utilisateurs peuvent être " +"confrontés." -#~ msgid "" -#~ "The previous return type format (int," -#~ " float, float) and the extended " -#~ "format (int, float, float, Dict[str, " -#~ "Scalar]) have been deprecated and " -#~ "removed since Flower 0.19." -#~ msgstr "" +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" +"**Flower est un cadre convivial pour l'IA collaborative et la science des" +" données.** En restant fidèle à cette déclaration, Flower permet de " +"désactiver facilement la télémétrie pour les utilisateurs qui ne " +"souhaitent pas partager des mesures d'utilisation anonymes." -#~ msgid "Train the provided parameters using the locally held dataset." -#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "Principes" -#~ msgid "" -#~ "Configuration parameters which allow the " -#~ "server to influence training on the " -#~ "client. It can be used to " -#~ "communicate arbitrary values from the " -#~ "server to the client, for example, " -#~ "to set the number of (local) " -#~ "training epochs." -#~ msgstr "" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" +"Nous suivons des principes stricts concernant la collecte de données " +"anonymes sur l'utilisation :" -#~ msgid "" -#~ "* **parameters** (*NDArrays*) -- The " -#~ "locally updated model parameters. * " -#~ "**num_examples** (*int*) -- The number " -#~ "of examples used for training. * " -#~ "**metrics** (*Dict[str, Scalar]*) -- A " -#~ "dictionary mapping arbitrary string keys " -#~ "to values of type bool, bytes, " -#~ "float, int, or str. It can be " -#~ "used to communicate arbitrary values " -#~ "back to the server." -#~ msgstr "" +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "" +"**Optionnel:** Tu pourras toujours désactiver la télémétrie ; lis la " +"suite pour apprendre \"[Comment se désengager](#how-to-opt-out)\"." -#~ msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" +"**Anonyme:** Les mesures d'utilisation rapportées sont anonymes et ne " +"contiennent aucune information personnelle identifiable (PII). Voir " +"\"[Collected metrics](#collected-metrics)\" pour comprendre quelles " +"mesures sont rapportées." -#~ msgid "**num_examples** (*int*) -- The number of examples used for training." -#~ msgstr "" +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" +"**Transparent:** Tu peux facilement inspecter les métriques anonymes qui " +"sont rapportées ; voir la section \"[Comment inspecter ce qui est " +"rapporté](#how-to-inspect-what-is-being-reported)\"" -#~ msgid "" -#~ "Configuration parameters requested by the " -#~ "server. This can be used to tell" -#~ " the client which parameters are " -#~ "needed along with some Scalar " -#~ "attributes." -#~ msgstr "" +#: ../../source/ref-telemetry.md:14 +#, fuzzy +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" +"**Ouvert pour les commentaires:** Tu peux toujours nous contacter si tu " +"as des commentaires ; voir la section \"[Comment nous contacter ](#how-" +"to-contact-us)\" pour plus de détails." -#~ msgid "" -#~ "**parameters** -- The local model " -#~ "parameters as a list of NumPy " -#~ "ndarrays." -#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "Comment se désinscrire" -#~ msgid "Return a client's set of properties." -#~ msgstr "Renvoie l'ensemble des propriétés d'un client." +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" +"Lorsque Flower démarre, il vérifie la présence d'une variable " +"d'environnement appelée `FLWR_TELEMETRY_ENABLED`. La télémétrie peut " +"facilement être désactivée en réglant `FLWR_TELEMETRY_ENABLED=0`. En " +"supposant que tu démarres un serveur ou un client Flower, fais-le " +"simplement en faisant précéder ta commande de la façon suivante :" -#~ msgid "" -#~ "Configuration parameters requested by the " -#~ "server. This can be used to tell" -#~ " the client which properties are " -#~ "needed along with some Scalar " -#~ "attributes." -#~ msgstr "" +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" +"Tu peux aussi exporter `FLWR_TELEMETRY_ENABLED=0` dans, par exemple, " +"`.bashrc` (ou tout autre fichier de configuration qui s'applique à ton " +"environnement) pour désactiver la télémétrie de la fleur de façon " +"permanente." -#~ msgid "" -#~ "**properties** -- A dictionary mapping " -#~ "arbitrary string keys to values of " -#~ "type bool, bytes, float, int, or " -#~ "str. It can be used to communicate" -#~ " arbitrary property values back to " -#~ "the server." -#~ msgstr "" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "Mesures collectées" -#~ msgid "Start a Flower NumPyClient which connects to a gRPC server." -#~ msgstr "" +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "La télémétrie des fleurs recueille les métriques suivantes :" -#~ msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -#~ msgstr "" +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" +"**Cela nous aide à décider si nous devons investir des efforts dans la " +"publication d'une version corrective pour une version plus ancienne de " +"Flower ou si nous devons plutôt utiliser la bande passante pour " +"développer de nouvelles fonctionnalités." -#~ msgid "Starting a client with an insecure server connection:" -#~ msgstr "" +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" +"**Système d'exploitation.** Nous permet de répondre à des questions " +"telles que : *Faudrait-il créer plus de guides pour Linux, macOS ou " +"Windows ?" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" +"**Version de Python.** Connaître la version de Python nous aide, par " +"exemple, à décider si nous devons investir des efforts dans la prise en " +"charge des anciennes versions de Python ou cesser de les prendre en " +"charge et commencer à tirer parti des nouvelles fonctionnalités de " +"Python." + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" +"**Comprendre l'environnement matériel dans lequel Flower est utilisé " +"permet de décider si nous devrions, par exemple, faire plus d'efforts " +"pour prendre en charge les environnements à faibles ressources." + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" +"**Mode d'exécution** Connaître le mode d'exécution dans lequel Flower " +"démarre nous permet de comprendre à quel point certaines fonctionnalités " +"sont utilisées et de mieux établir les priorités en fonction de cela." + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" +"**Cluster.** La télémétrie Flower attribue un ID de cluster en mémoire " +"aléatoire à chaque fois qu'une charge de travail Flower démarre. Cela " +"nous permet de comprendre quels types d'appareils non seulement démarrent" +" les charges de travail Flower, mais aussi les terminent avec succès." + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" +"**Source.** La télémétrie de Flower essaie de stocker un ID de source " +"aléatoire dans `~/.flwr/source` la première fois qu'un événement de " +"télémétrie est généré. L'ID de source est important pour identifier si un" +" problème est récurrent ou si un problème est déclenché par plusieurs " +"clusters fonctionnant simultanément (ce qui arrive souvent en " +"simulation). Par exemple, si un périphérique exécute plusieurs charges de" +" travail en même temps, et que cela entraîne un problème, alors, afin de " +"reproduire le problème, plusieurs charges de travail doivent être " +"démarrées en même temps." + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" +"Tu peux supprimer l'identifiant de la source à tout moment. Si tu " +"souhaites que tous les événements enregistrés sous un identifiant de " +"source spécifique soient supprimés, tu peux envoyer une demande de " +"suppression mentionnant l'identifiant de source à `telemetry@flower.ai`. " +"Tous les événements liés à cet identifiant de source seront alors " +"définitivement supprimés." + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" +"Nous ne collecterons aucune information personnelle identifiable. Si tu " +"penses que l'une des métriques collectées pourrait être utilisée à " +"mauvais escient de quelque manière que ce soit, merci de [nous " +"contacter](#commentnouscontacter). Nous mettrons à jour cette page pour " +"refléter toute modification des métriques collectées et nous publierons " +"les changements dans le journal des modifications (changelog)." + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" +"Si tu penses que d'autres mesures nous seraient utiles pour mieux " +"orienter nos décisions, fais-le nous savoir ! Nous les examinerons " +"attentivement ; si nous sommes convaincus qu'elles ne compromettent pas " +"la vie privée des utilisateurs, nous pourrons les ajouter." + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "Comment inspecter ce qui est rapporté" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" +"Nous avons voulu qu'il soit très facile pour toi d'inspecter les mesures " +"d'utilisation anonymes qui sont rapportées. Tu peux voir toutes les " +"informations de télémétrie rapportées en définissant la variable " +"d'environnement `FLWR_TELEMETRY_LOGGING=1`. La journalisation est " +"désactivée par défaut. Tu peux utiliser la journalisation indépendamment " +"de `FLWR_TELEMETRY_ENABLED` afin d'inspecter la fonction de télémétrie " +"sans envoyer de mesures." + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" +"L'inspecteur Flower telemetry sans envoyer de métriques d'utilisation " +"anonymes, utilise les deux variables d'environnement :" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "Comment nous contacter" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" +"Si tu as des commentaires ou des idées pour améliorer la façon dont nous " +"traitons les mesures d'utilisation anonymes, contacte-nous via " +"[Slack](https://flower.ai/join-slack/) (canal `#telemetry`) ou par " +"courriel (`telemetry@flower.ai`)." + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:4 +#, fuzzy +msgid "Quickstart Android" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-android.rst:9 +#, fuzzy +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" +"Construisons un système d'apprentissage fédéré en utilisant fastai et " +"Flower !" + +#: ../../source/tutorial-quickstart-android.rst:11 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "et active l'environnement virtuel avec :" + +#: ../../source/tutorial-quickstart-fastai.rst:41 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:108 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "Démarrage rapide 🤗 Transformateurs" + +#: ../../source/tutorial-quickstart-huggingface.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#, fuzzy +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" + +#: ../../source/tutorial-quickstart-huggingface.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#, fuzzy +msgid "The Data" +msgstr "Chargement des données" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +#, fuzzy +msgid "The Model" +msgstr "Entraîne le modèle" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " +"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " +"précisément, nous mettrons au point un modèle Transformer pré-entraîné " +"(distilBERT) pour la classification de séquences sur un ensemble de " +"données d'évaluations IMDB. L'objectif final est de détecter si " +"l'évaluation d'un film est positive ou négative." + +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#, fuzzy +msgid "The ClientApp" +msgstr "client" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:283 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#, fuzzy +msgid "The ServerApp" +msgstr "serveur" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:361 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:4 +#, fuzzy +msgid "Quickstart iOS" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-ios.rst:9 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" +"Dans ce tutoriel, nous allons apprendre, comment former un réseau " +"neuronal convolutif sur MNIST en utilisant Flower et PyTorch." + +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-ios.rst:17 +#, fuzzy +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + +#: ../../source/tutorial-quickstart-ios.rst:20 +#, fuzzy +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-ios.rst:26 +#, fuzzy +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" +"Maintenant que nous avons une idée générale de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"exécutant :" + +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" +msgstr "Client de la fleur" + +#: ../../source/tutorial-quickstart-ios.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:80 +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:112 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:118 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:133 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:141 +msgid "" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" +msgstr "Serveur de Flower" + +#: ../../source/tutorial-quickstart-ios.rst:150 +#, fuzzy +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" +msgstr "" +"Pour les charges de travail simples, nous pouvons démarrer un serveur " +"Flower et laisser toutes les possibilités de configuration à leurs " +"valeurs par défaut. Dans un fichier nommé :code:`server.py`, importe " +"Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" +msgstr "Entraîne le modèle, fédéré !" + +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout " +"exécuter et voir l'apprentissage fédéré en action. Les systèmes FL ont " +"généralement un serveur et plusieurs clients. Nous devons donc commencer " +"par démarrer le serveur :" + +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:177 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-jax.rst:9 +#, fuzzy +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"Ce tutoriel te montrera comment utiliser Flower pour construire une " +"version fédérée d'une charge de travail JAX existante. Nous utilisons JAX" +" pour entraîner un modèle de régression linéaire sur un ensemble de " +"données scikit-learn. Nous structurerons l'exemple de la même manière que" +" notre présentation `PyTorch - De la centralisation à la fédération " +"`_. Tout d'abord, nous construisons une approche" +" d'entraînement centralisée basée sur le tutoriel `Régression linéaire " +"avec JAX " +"`_." +" Ensuite, nous nous appuyons sur le code d'entraînement centralisé pour " +"exécuter l'entraînement de manière fédérée." + +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy +msgid "" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +msgstr "" +"Avant de commencer à construire notre exemple JAX, nous devons installer " +"les paquets :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, et " +":code:`flwr` :" + +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "Régression linéaire avec JAX" + +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy +msgid "" +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." +msgstr "" +"Nous commençons par une brève description du code d'entraînement " +"centralisé basé sur un modèle :code:`Régression linéaire`. Si tu veux une" +" explication plus approfondie de ce qui se passe, jette un coup d'œil à " +"la documentation officielle `JAX `_." + +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy +msgid "" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" +"Créons un nouveau fichier appelé :code:`jax_training.py` avec tous les " +"composants nécessaires pour un apprentissage traditionnel (centralisé) de" +" la régression linéaire. Tout d'abord, les paquets JAX :code:`jax` et " +":code:`jaxlib` doivent être importés. En outre, nous devons importer " +":code:`sklearn` puisque nous utilisons :code:`make_regression` pour le " +"jeu de données et :code:`train_test_split` pour diviser le jeu de données" +" en un jeu d'entraînement et un jeu de test. Tu peux voir que nous " +"n'avons pas encore importé le paquet :code:`flwr` pour l'apprentissage " +"fédéré, ce qui sera fait plus tard." + +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr "" +"La fonction :code:`load_data()` charge les ensembles d'entraînement et de" +" test mentionnés." + +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "" +"L'architecture du modèle (un modèle :code:`Régression linéaire` très " +"simple) est définie dans :code:`load_model()`." + +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." +msgstr "" +"Nous devons maintenant définir l'entraînement (fonction :code:`train()`)," +" qui boucle sur l'ensemble d'entraînement et mesure la perte (fonction " +":code:`loss_fn()`) pour chaque lot d'exemples d'entraînement. La fonction" +" de perte est séparée puisque JAX prend des dérivés avec une fonction " +":code:`grad()` (définie dans la fonction :code:`main()` et appelée dans " +":code:`train()`)." + +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy +msgid "" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "" +"L'évaluation du modèle est définie dans la fonction :code:`evaluation()`." +" La fonction prend tous les exemples de test et mesure la perte du modèle" +" de régression linéaire." + +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." +msgstr "" +"Après avoir défini le chargement des données, l'architecture du modèle, " +"l'entraînement et l'évaluation, nous pouvons tout assembler et entraîner " +"notre modèle à l'aide de JAX. Comme nous l'avons déjà mentionné, la " +"fonction :code:`jax.grad()` est définie dans :code:`main()` et transmise " +"à :code:`train()`." + +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" +"Tu peux maintenant exécuter ta charge de travail (centralisée) de " +"régression linéaire JAX :" + +#: ../../source/tutorial-quickstart-jax.rst:132 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" +"Jusqu'à présent, tout cela devrait te sembler assez familier si tu as " +"déjà utilisé JAX. Passons à l'étape suivante et utilisons ce que nous " +"avons construit pour créer un simple système d'apprentissage fédéré " +"composé d'un serveur et de deux clients." + +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "JAX rencontre Flower" + +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" +"Le concept de fédération d'une charge de travail existante est toujours " +"le même et facile à comprendre. Nous devons démarrer un *serveur*, puis " +"utiliser le code dans :code:`jax_training.py` pour les *clients* qui sont" +" connectés au *serveur*.Le *serveur* envoie les paramètres du modèle aux " +"clients.Les *clients* exécutent la formation et mettent à jour les " +"paramètres.Les paramètres mis à jour sont renvoyés au *serveur*, qui fait" +" la moyenne de toutes les mises à jour de paramètres reçues.Ceci décrit " +"un tour du processus d'apprentissage fédéré, et nous répétons cette " +"opération pour plusieurs tours." + +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy +msgid "" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" +msgstr "" +"Enfin, nous allons définir la logique de notre *client* dans " +":code:`client.py` et nous appuyer sur la formation JAX définie " +"précédemment dans :code:`jax_training.py`. Notre *client* doit importer " +":code:`flwr`, mais aussi :code:`jax` et :code:`jaxlib` pour mettre à jour" +" les paramètres de notre modèle JAX :" + +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" +"L'implémentation d'un *client* Flower signifie essentiellement " +"l'implémentation d'une sous-classe de :code:`flwr.client.Client` ou " +":code:`flwr.client.NumPyClient`. Notre implémentation sera basée sur " +":code:`flwr.client.NumPyClient` et nous l'appellerons " +":code:`FlowerClient`. :code:`NumPyClient` est légèrement plus facile à " +"implémenter que :code:`Client` si vous utilisez un framework avec une " +"bonne interopérabilité NumPy (comme JAX) parce qu'il évite une partie du " +"boilerplate qui serait autrement nécessaire. :code:`FlowerClient` doit " +"implémenter quatre méthodes, deux méthodes pour obtenir/régler les " +"paramètres du modèle, une méthode pour former le modèle, et une méthode " +"pour tester le modèle :" + +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" +msgstr ":code:`set_parameters (optional)`" + +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "transforme les paramètres en NumPy :code:`ndarray`'s" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "" +"récupère les paramètres du modèle local mis à jour et les renvoie au " +"serveur" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "renvoie la perte locale au serveur" + +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." +msgstr "" +"La partie la plus difficile consiste à transformer les paramètres du " +"modèle JAX de :code:`DeviceArray` en :code:`NumPy ndarray` pour les " +"rendre compatibles avec `NumPyClient`." + +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy +msgid "" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." +msgstr "" +"Les deux méthodes :code:`NumPyClient` :code:`fit` et :code:`evaluate` " +"utilisent les fonctions :code:`train()` et :code:`evaluate()` définies " +"précédemment dans :code:`jax_training.py`. Ce que nous faisons vraiment " +"ici, c'est que nous indiquons à Flower, par le biais de notre sous-classe" +" :code:`NumPyClient`, laquelle de nos fonctions déjà définies doit être " +"appelée pour l'entraînement et l'évaluation. Nous avons inclus des " +"annotations de type pour te donner une meilleure compréhension des types " +"de données qui sont transmis." + +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "Après avoir défini le processus de fédération, nous pouvons l'exécuter." + +#: ../../source/tutorial-quickstart-jax.rst:315 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" +"dans chaque fenêtre (assure-toi que le serveur est toujours en cours " +"d'exécution avant de le faire) et tu verras que ton projet JAX exécute " +"l'apprentissage fédéré sur deux clients. Félicitations !" + +#: ../../source/tutorial-quickstart-jax.rst:321 +#, fuzzy +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" +"Le code source de cet exemple a été amélioré au fil du temps et peut être" +" trouvé ici : `Quickstart JAX " +"`_. " +"Notre exemple est quelque peu simplifié à l'extrême car les deux clients " +"chargent le même jeu de données." + +#: ../../source/tutorial-quickstart-jax.rst:325 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "" +"Tu es maintenant prêt à approfondir ce sujet. Pourquoi ne pas utiliser un" +" modèle plus sophistiqué ou un ensemble de données différent ? Pourquoi " +"ne pas ajouter d'autres clients ?" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "Démarrage rapide de JAX" + +#: ../../source/tutorial-quickstart-mlx.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-mlx.rst:10 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:116 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:243 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:277 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "Tout assembler" + +#: ../../source/tutorial-quickstart-mlx.rst:331 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:390 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "Construisons un système d'analyse fédéré à l'aide de Pandas et de Flower !" + +#: ../../source/tutorial-quickstart-pandas.rst:11 +#, fuzzy +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"Réfère-toi à l'exemple de code complet " +"`_ " +"pour en savoir plus." + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:309 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:348 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "Tutoriel" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#, fuzzy +msgid "" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." +msgstr "" +"Dans ce tutoriel, nous allons apprendre à former un :code:`modèle de " +"régression logistique` sur MNIST en utilisant Flower et scikit-learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" +"Il est recommandé de créer un environnement virtuel et de tout exécuter " +"dans ce `virtualenv `_." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" +"Notre exemple consiste en un *serveur* et deux *clients* ayant tous le " +"même modèle." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour individuelles des " +"paramètres du modèle en fonction de leurs ensembles de données locales. " +"Ces mises à jour sont ensuite envoyées au *serveur* qui les agrège pour " +"produire un modèle global mis à jour. Enfin, le *serveur* renvoie cette " +"version améliorée du modèle à chaque *client*. Un cycle complet de mises " +"à jour des paramètres s'appelle un *round*." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "Ou installe simplement toutes les dépendances à l'aide de Poetry :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#, fuzzy +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"Maintenant que toutes nos dépendances sont installées, exécutons une " +"formation distribuée simple avec deux clients et un serveur. Cependant, " +"avant de configurer le client et le serveur, nous allons définir toutes " +"les fonctionnalités dont nous avons besoin pour notre configuration " +"d'apprentissage fédéré dans :code:`utils.py`. Le :code:`utils.py` " +"contient différentes fonctions définissant toutes les bases de " +"l'apprentissage automatique :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#, fuzzy +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +msgstr "" +"Renvoie les paramètres d'un modèle de régression logistique " +":code:`sklearn`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +#, fuzzy +msgid "``set_model_params()``" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +#, fuzzy +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +#, fuzzy +msgid "``set_initial_params()``" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "Initialise les paramètres du modèle que le serveur de Flower demandera" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#, fuzzy +msgid "" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" +msgstr "" +"Tu peux consulter :code:`utils.py` `ici " +"`_ pour plus de détails. Les fonctions prédéfinies sont " +"utilisées dans :code:`client.py` et importées. :code:`client.py` " +"nécessite également d'importer plusieurs paquets tels que Flower et " +"scikit-learn :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#, fuzzy +msgid "" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." +msgstr "" +"Ensuite, le modèle de régression logistique est défini et initialisé avec" +" :code:`utils.set_initial_params()`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#, fuzzy +msgid "" +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Le serveur Flower interagit avec les clients par le biais d'une interface" +" appelée :code:`Client`. Lorsque le serveur sélectionne un client " +"particulier pour la formation, il envoie des instructions de formation " +"sur le réseau. Le client reçoit ces instructions et appelle l'une des " +"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour ajuster" +" la régression logistique que nous avons définie plus tôt)." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#, fuzzy +msgid "" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" +"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " +"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" +" de travail utilise scikit-learn. Mettre en œuvre :code:`NumPyClient` " +"signifie généralement définir les méthodes suivantes " +"(:code:`set_parameters` est cependant facultatif) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters` (optionnel)" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" +"mettre à jour les poids du modèle local avec les paramètres reçus du " +"serveur" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#, fuzzy +msgid "is directly imported with ``utils.set_model_params()``" +msgstr "est directement importé avec :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" +msgstr "fixe les poids du modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" +msgstr "entraîne le modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#, fuzzy +msgid "return the updated local model weights" +msgstr "recevoir les poids du modèle local mis à jour" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" +msgstr "teste le modèle local" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" +msgstr "Les méthodes peuvent être mises en œuvre de la manière suivante :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#, fuzzy +msgid "" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"0.0.0:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons " +":code:`\"0.0.0:8080\"`. Si nous exécutons une charge de travail " +"véritablement fédérée avec le serveur et les clients s'exécutant sur des " +"machines différentes, tout ce qui doit changer est :code:`server_address`" +" que nous transmettons au client." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"Le serveur Flower suivant est un peu plus avancé et renvoie une fonction " +"d'évaluation pour l'évaluation côté serveur. Tout d'abord, nous importons" +" à nouveau toutes les bibliothèques requises telles que Flower et scikit-" +"learn." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "``server.py``, import Flower and start the server:" +msgstr ":code:`server.py`, importe Flower et démarre le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#, fuzzy +msgid "" +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." +msgstr "" +"Le nombre de tours d'apprentissage fédéré est défini dans " +":code:`fit_round()` et l'évaluation est définie dans " +":code:`get_evaluate_fn()`. La fonction d'évaluation est appelée après " +"chaque tour d'apprentissage fédéré et te donne des informations sur la " +"perte et la précision." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#, fuzzy +msgid "" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +msgstr "" +"Le :code:`main` contient l'initialisation des paramètres côté serveur " +":code:`utils.set_initial_params()` ainsi que la stratégie d'agrégation " +":code:`fl.server.strategy:FedAvg()`. La stratégie est celle par défaut, " +"la moyenne fédérée (ou FedAvg), avec deux clients et une évaluation après" +" chaque tour d'apprentissage fédéré. Le serveur peut être démarré avec la" +" commande :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" +"Le client et le serveur étant prêts, nous pouvons maintenant tout lancer " +"et voir l'apprentissage fédéré en action. Les systèmes d'apprentissage " +"fédéré ont généralement un serveur et plusieurs clients. Nous devons donc" +" commencer par lancer le serveur :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" +"Une fois que le serveur fonctionne, nous pouvons démarrer les clients " +"dans différents terminaux. Ouvre un nouveau terminal et démarre le " +"premier client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" +"Chaque client aura son propre ensemble de données. Tu devrais maintenant " +"voir comment la formation se déroule dans le tout premier terminal (celui" +" qui a démarré le serveur) :" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples/sklearn-logreg-" +"mnist`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#, fuzzy +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +#, fuzzy +msgid "Federated XGBoost" +msgstr "Formation fédérée" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +#, fuzzy +msgid "Why federated XGBoost?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:36 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:48 +#, fuzzy +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." + +#: ../../source/tutorial-quickstart-xgboost.rst:51 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +#, fuzzy +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "Puisque nous voulons utiliser scikt-learn, allons-y et installons-le :" + +#: ../../source/tutorial-quickstart-xgboost.rst:67 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:71 +#, fuzzy +msgid "" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés à PyTorch :" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:101 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:115 +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:135 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:149 +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:197 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:236 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:278 +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:298 +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:330 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:333 +#, fuzzy +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" +msgstr "" +"Nous pouvons maintenant créer une instance de notre classe " +":code:`MnistClient` et ajouter une ligne pour exécuter ce client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:350 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." +msgstr "" +"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" +" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " +"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " +"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " +"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " +":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" +" avec le serveur et les clients fonctionnant sur des machines " +"différentes, tout ce qui doit changer est l'adresse " +":code:`server_address` vers laquelle nous dirigeons le client." + +#: ../../source/tutorial-quickstart-xgboost.rst:360 +#, fuzzy +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"*Les clients* sont chargés de générer des mises à jour de poids " +"individuelles pour le modèle en fonction de leurs ensembles de données " +"locales. Ces mises à jour sont ensuite envoyées au *serveur* qui les " +"agrège pour produire un meilleur modèle. Enfin, le *serveur* renvoie " +"cette version améliorée du modèle à chaque *client*. Un cycle complet de " +"mises à jour de poids s'appelle un *round*." + +#: ../../source/tutorial-quickstart-xgboost.rst:364 +#, fuzzy +msgid "" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." +msgstr "" +"Dans un fichier appelé :code:`client.py`, importe Flower et les paquets " +"liés au MXNet :" + +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 +msgid "" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:406 +#, fuzzy +msgid "Then, we start the server:" +msgstr "Démarrer le serveur" + +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:420 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:422 +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:519 +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:579 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:584 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:664 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:668 +#, fuzzy +msgid "" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +#, fuzzy +msgid "Cyclic training" +msgstr "Formation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:733 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:775 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:778 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:842 +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:873 +#, fuzzy +msgid "Customised centralised/distributed evaluation" +msgstr "Évaluation centralisée" + +#: ../../source/tutorial-quickstart-xgboost.rst:875 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:907 +msgid "" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:911 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:916 +#, fuzzy +msgid "Flower simulation" +msgstr "Simulation de moniteur" + +#: ../../source/tutorial-quickstart-xgboost.rst:918 +msgid "" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:954 +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1065 +msgid "" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1085 +msgid "" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1175 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +#, fuzzy +msgid "Example commands" +msgstr "Exemples de PyTorch" + +#: ../../source/tutorial-quickstart-xgboost.rst:1322 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +#, fuzzy +msgid "Then, on each client terminal, we start the clients:" +msgstr "Ouvre un autre terminal et démarre le deuxième client :" + +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +#, fuzzy +msgid "Build a strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"Bienvenue dans la troisième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__) " +"et nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et sur les clients " +"(`partie 2 `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons continuer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit précédemment en créant " +"une version personnalisée de FedAvg (encore une fois, en utilisant " +"`Flower `__ et `PyTorch `__)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "Construisons une nouvelle ``Stratégie`` à partir de zéro !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "Préparation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer le code proprement dit, assurons-nous que nous " +"disposons de tout ce dont nous avons besoin." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "Tout d'abord, nous installons les paquets nécessaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" +"Maintenant que toutes les dépendances sont installées, nous pouvons " +"importer tout ce dont nous avons besoin pour ce tutoriel :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "Formation/évaluation du modèle" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" +"Continuons avec la définition habituelle du modèle (y compris " +"``set_parameters`` et ``get_parameters``), les fonctions d'entraînement " +"et de test :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"Pour mettre en œuvre le client Flower, nous créons (à nouveau) une sous-" +"classe de ``flwr.client.NumPyClient`` et mettons en œuvre les trois " +"méthodes ``get_parameters``, ``fit`` et ``evaluate``. Ici, nous " +"transmettons également le ``cid`` au client et l'utilisons pour consigner" +" des détails supplémentaires :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "Testons ce que nous avons jusqu'à présent avant de continuer :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "Élaborer une stratégie à partir de zéro" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"Remplaçons la méthode ``configure_fit`` de façon à ce qu'elle transmette " +"un taux d'apprentissage plus élevé (potentiellement aussi d'autres " +"hyperparamètres) à l'optimiseur d'une fraction des clients. Nous " +"garderons l'échantillonnage des clients tel qu'il est dans ``FedAvg`` et " +"changerons ensuite le dictionnaire de configuration (l'un des attributs " +"``FitIns``)." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" +"Il ne reste plus qu'à utiliser la stratégie personnalisée nouvellement " +"créée ``FedCustom`` lors du démarrage de l'expérience :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "Récapitulation" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" +"Dans ce carnet, nous avons vu comment mettre en place une stratégie " +"personnalisée. Une stratégie personnalisée permet un contrôle granulaire " +"sur la configuration des nœuds clients, l'agrégation des résultats, et " +"bien plus encore. Pour définir une stratégie personnalisée, il te suffit " +"d'écraser les méthodes abstraites de la classe de base (abstraite) " +"``Strategy``. Pour rendre les stratégies personnalisées encore plus " +"puissantes, tu peux passer des fonctions personnalisées au constructeur " +"de ta nouvelle classe (``__init__``) et appeler ensuite ces fonctions à " +"chaque fois que c'est nécessaire." + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#, fuzzy +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" +"Il existe un canal dédié aux ``questions`` si vous avez besoin d'aide, " +"mais nous aimerions aussi savoir qui vous êtes dans ``#introductions`` !" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 4 " +"`__ présente ``Client``, l'API flexible qui sous-tend " +"``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +#, fuzzy +msgid "Customize the client" +msgstr "Création du client IMDBC" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"Bienvenue dans la quatrième partie du tutoriel sur l'apprentissage fédéré" +" Flower. Dans les parties précédentes de ce tutoriel, nous avons présenté" +" l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__), " +"nous avons appris comment les stratégies peuvent être utilisées pour " +"personnaliser l'exécution à la fois sur le serveur et les clients " +"(`partie 2 `__), et nous avons construit notre propre stratégie " +"personnalisée à partir de zéro (`partie 3 - WIP " +"`__)." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" +"Dans ce carnet, nous revisitons `NumPyClient`` et introduisons une " +"nouvelle classe de base pour construire des clients, simplement appelée " +"`Client``. Dans les parties précédentes de ce tutoriel, nous avons basé " +"notre client sur ``NumPyClient``, une classe de commodité qui facilite le" +" travail avec les bibliothèques d'apprentissage automatique qui ont une " +"bonne interopérabilité NumPy. Avec ``Client``, nous gagnons beaucoup de " +"flexibilité que nous n'avions pas auparavant, mais nous devrons également" +" faire quelques choses que nous n'avions pas à faire auparavant." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#, fuzzy +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "" +"Allons plus loin et voyons ce qu'il faut faire pour passer de " +"``NumPyClient`` à ``Client`` !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "Étape 0 : Préparation" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation) et enveloppons le tout dans " +"leur propre ``DataLoader``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "Étape 1 : Revoir NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#, fuzzy +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" +"Jusqu'à présent, nous avons implémenté notre client en sous-classant " +"``flwr.client.NumPyClient``. Les trois méthodes que nous avons " +"implémentées sont ``get_parameters``, ``fit`` et ``evaluate``. Enfin, " +"nous enveloppons la création d'instances de cette classe dans une " +"fonction appelée ``client_fn`` :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#, fuzzy +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"Nous avons déjà vu cela auparavant, il n'y a rien de nouveau jusqu'à " +"présent. La seule *petite* différence par rapport au carnet précédent est" +" le nommage, nous avons changé ``FlowerClient`` en ``FlowerNumPyClient`` " +"et ``client_fn`` en ``numpyclient_fn``. Exécutons-le pour voir la sortie " +"que nous obtenons :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#, fuzzy +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "" +"Cela fonctionne comme prévu, deux clients s'entraînent pour trois tours " +"d'apprentissage fédéré." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#, fuzzy +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" +"Plongeons un peu plus profondément et discutons de la façon dont Flower " +"exécute cette simulation. Chaque fois qu'un client est sélectionné pour " +"effectuer un travail, ``start_simulation`` appelle la fonction " +"``numpyclient_fn`` pour créer une instance de notre ``FlowerNumPyClient``" +" (en même temps qu'il charge le modèle et les données)." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" +"Mais voici la partie la plus surprenante : Flower n'utilise pas " +"directement l'objet `FlowerNumPyClient`. Au lieu de cela, il enveloppe " +"l'objet pour le faire ressembler à une sous-classe de " +"`flwr.client.Client`, et non de `flwr.client.NumPyClient`. En fait, le " +"noyau de Flower ne sait pas comment gérer les `NumPyClient`, il sait " +"seulement comment gérer les `Client`. `NumPyClient` est juste une " +"abstraction de commodité construite au dessus de `Client`." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" +"Au lieu de construire par-dessus `NumPyClient``, nous pouvons construire " +"directement par-dessus `Client``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "Étape 2 : Passer de ``NumPyClient`` à ``Client``" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" +"Essayons de faire la même chose en utilisant ``Client`` au lieu de " +"``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" +"Avant de discuter du code plus en détail, essayons de l'exécuter ! Nous " +"devons nous assurer que notre nouveau client basé sur le ``Client`` " +"fonctionne, n'est-ce pas ?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" +"Voilà, nous utilisons maintenant ``Client``. Cela ressemble probablement " +"à ce que nous avons fait avec ``NumPyClient``. Alors quelle est la " +"différence ?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" +"La seule *vraie* différence entre Client et NumPyClient est que " +"NumPyClient s'occupe de la sérialisation et de la désérialisation pour " +"toi. Il peut le faire parce qu'il s'attend à ce que tu renvoies des " +"paramètres sous forme de NumPy ndarray, et il sait comment les gérer. " +"Cela permet de travailler avec des bibliothèques d'apprentissage " +"automatique qui ont une bonne prise en charge de NumPy (la plupart " +"d'entre elles) en un clin d'œil." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "Étape 3 : Sérialisation personnalisée" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" +"Nous allons ici explorer comment mettre en œuvre une sérialisation " +"personnalisée à l'aide d'un exemple simple." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" +"Mais d'abord, qu'est-ce que la sérialisation ? La sérialisation est " +"simplement le processus de conversion d'un objet en octets bruts, et tout" +" aussi important, la désérialisation est le processus de reconversion des" +" octets bruts en objet. Ceci est très utile pour la communication réseau." +" En effet, sans la sérialisation, tu ne pourrais pas faire passer un " +"objet Python par Internet." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" +"L'apprentissage fédéré s'appuie fortement sur la communication Internet " +"pour la formation en envoyant des objets Python dans les deux sens entre " +"les clients et le serveur, ce qui signifie que la sérialisation est un " +"élément essentiel de l'apprentissage fédéré." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" +"Dans la section suivante, nous allons écrire un exemple de base où, au " +"lieu d'envoyer une version sérialisée de nos ``ndarray`` contenant nos " +"paramètres, nous allons d'abord convertir les ``ndarray`` en matrices " +"éparses, avant de les envoyer. Cette technique peut être utilisée pour " +"économiser de la bande passante, car dans certains cas où les poids d'un " +"modèle sont épars (contenant de nombreuses entrées 0), les convertir en " +"une matrice éparse peut grandement améliorer leur taille en octets." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "Nos fonctions de sérialisation/désérialisation personnalisées" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" +"C'est là que la véritable sérialisation/désérialisation se produira, en " +"particulier dans ``ndarray_to_sparse_bytes`` pour la sérialisation et " +"``sparse_bytes_to_ndarray`` pour la désérialisation." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" +"Notez que nous avons importé la bibliothèque ``scipy.sparse`` afin de " +"convertir nos tableaux." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "Côté client" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" +"Pour pouvoir sérialiser nos ``ndarray`` en paramètres sparse, il nous " +"suffira d'appeler nos fonctions personnalisées dans notre " +"``flwr.client.Client``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" +"En effet, dans ``get_parameters`` nous devons sérialiser les paramètres " +"que nous avons obtenus de notre réseau en utilisant nos " +"``ndarrays_to_sparse_parameters`` personnalisés définis ci-dessus." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" +"Dans ``fit``, nous devons d'abord désérialiser les paramètres provenant " +"du serveur en utilisant notre ``sparse_parameters_to_ndarrays`` " +"personnalisé, puis nous devons sérialiser nos résultats locaux avec " +"``ndarrays_to_sparse_parameters``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" +"Dans ``evaluate``, nous n'aurons besoin que de désérialiser les " +"paramètres globaux avec notre fonction personnalisée." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "Côté serveur" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"Pour cet exemple, nous utiliserons simplement ``FedAvg`` comme stratégie." +" Pour modifier la sérialisation et la désérialisation ici, il suffit de " +"réimplémenter les fonctions ``evaluate`` et ``aggregate_fit`` de " +"``FedAvg``. Les autres fonctions de la stratégie seront héritées de la " +"super-classe ``FedAvg``." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "Comme tu peux le voir, seule une ligne a été modifiée dans ``evaluate`` :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" +"Et pour ``aggregate_fit``, nous allons d'abord désérialiser chaque " +"résultat que nous avons reçu :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "Puis sérialise le résultat agrégé :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "" +"Nous pouvons maintenant exécuter notre exemple de sérialisation " +"personnalisée !" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" +"Dans cette partie du tutoriel, nous avons vu comment construire des " +"clients en sous-classant soit ``NumPyClient``, soit ``Client``. " +"``NumPyClient`` est une abstraction de commodité qui facilite le travail " +"avec les bibliothèques d'apprentissage automatique qui ont une bonne " +"interopérabilité NumPy. ``Client`` est une abstraction plus flexible qui " +"nous permet de faire des choses qui ne sont pas possibles dans " +"``NumPyClient``. Pour ce faire, elle nous oblige à gérer nous-mêmes la " +"sérialisation et la désérialisation des paramètres." + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" +"C'est la dernière partie du tutoriel Flower (pour l'instant !), " +"félicitations ! Tu es maintenant bien équipé pour comprendre le reste de " +"la documentation. Il y a de nombreux sujets que nous n'avons pas abordés " +"dans le tutoriel, nous te recommandons les ressources suivantes :" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "`Lire les docs sur les fleurs `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "" +"`Check out Flower Code Examples " +"`__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +#, fuzzy +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" +"`Utilise les lignes de base des fleurs pour ta recherche " +"`__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#, fuzzy +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "" +"`Regardez les vidéos du Flower Summit 2022 `__" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "Bienvenue au tutoriel sur l'apprentissage fédéré de la fleur !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" +"Dans ce carnet, nous allons construire un système d'apprentissage fédéré " +"en utilisant Flower et PyTorch. Dans la première partie, nous utilisons " +"PyTorch pour le pipeline d'entraînement des modèles et le chargement des " +"données. Dans la deuxième partie, nous continuons à fédérer le pipeline " +"basé sur PyTorch en utilisant Flower." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" +"Avant de commencer à coder, assurons-nous que nous disposons de tout ce " +"dont nous avons besoin." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "Installation des dépendances" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +#, fuzzy +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" +"Ensuite, nous installons les paquets nécessaires pour PyTorch (``torch`` " +"et ``torchvision``) et Flower (``flwr``) :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"Il est possible de passer à un runtime dont l'accélération GPU est " +"activée (sur Google Colab : ``Runtime > Change runtime type > Hardware " +"acclerator : GPU > Save``). Note cependant que Google Colab n'est pas " +"toujours en mesure de proposer l'accélération GPU. Si tu vois une erreur " +"liée à la disponibilité du GPU dans l'une des sections suivantes, " +"envisage de repasser à une exécution basée sur le CPU en définissant " +"``DEVICE = torch.device(\"cpu\")``. Si le runtime a activé l'accélération" +" GPU, tu devrais voir apparaître le résultat ``Training on cuda``, sinon " +"il dira ``Training on cpu``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +#, fuzzy +msgid "Load the data" +msgstr "Chargement des données" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" +"L'apprentissage fédéré peut être appliqué à de nombreux types de tâches " +"dans différents domaines. Dans ce tutoriel, nous présentons " +"l'apprentissage fédéré en formant un simple réseau neuronal " +"convolutionnel (CNN) sur l'ensemble de données populaire CIFAR-10. " +"CIFAR-10 peut être utilisé pour former des classificateurs d'images qui " +"font la distinction entre les images de dix classes différentes :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" +"Nous simulons le fait d'avoir plusieurs ensembles de données provenant de" +" plusieurs organisations (également appelé le paramètre \"cross-silo\" " +"dans l'apprentissage fédéré) en divisant l'ensemble de données CIFAR-10 " +"original en plusieurs partitions. Chaque partition représentera les " +"données d'une seule organisation. Nous faisons cela purement à des fins " +"d'expérimentation, dans le monde réel, il n'y a pas besoin de diviser les" +" données parce que chaque organisation a déjà ses propres données (les " +"données sont donc naturellement partitionnées)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +#, fuzzy +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" +"Chaque organisation agira comme un client dans le système d'apprentissage" +" fédéré. Ainsi, le fait que dix organisations participent à une " +"fédération signifie que dix clients sont connectés au serveur " +"d'apprentissage fédéré :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"Nous avons maintenant une liste de dix ensembles de formation et dix " +"ensembles de validation (``trainloaders`` et ``valloaders``) représentant" +" les données de dix organisations différentes. Chaque paire " +"``trainloader`/``valloader`` contient 4500 exemples de formation et 500 " +"exemples de validation. Il y a également un seul ``testloader`` (nous " +"n'avons pas divisé l'ensemble de test). Encore une fois, cela n'est " +"nécessaire que pour construire des systèmes de recherche ou d'éducation, " +"les systèmes d'apprentissage fédérés actuels ont leurs données " +"naturellement distribuées à travers plusieurs partitions." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "" +"Jetons un coup d'œil au premier lot d'images et d'étiquettes du premier " +"ensemble d'entraînement (c'est-à-dire ``trainloaders[0]``) avant de " +"poursuivre :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"La sortie ci-dessus montre un lot aléatoire d'images provenant du premier" +" ``chargeur de formation`` de notre liste de dix ``chargeurs de " +"formation``. Elle imprime également les étiquettes associées à chaque " +"image (c'est-à-dire l'une des dix étiquettes possibles que nous avons " +"vues ci-dessus). Si tu exécutes à nouveau la cellule, tu devrais voir un " +"autre lot d'images." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "Étape 1 : Formation centralisée avec PyTorch" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" +"Ensuite, nous allons utiliser PyTorch pour définir un simple réseau " +"neuronal convolutif. Cette introduction suppose une familiarité de base " +"avec PyTorch, elle ne couvre donc pas en détail les aspects liés à " +"PyTorch. Si tu veux plonger plus profondément dans PyTorch, nous te " +"recommandons `DEEP LEARNING WITH PYTORCH : A 60 MINUTE BLITZ " +"`__." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "Définir le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" +"Nous utilisons le CNN simple décrit dans le tutoriel `PyTorch " +"`__ :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "Poursuivons avec les fonctions habituelles de formation et de test :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "Entraîne le modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "" +"Nous avons maintenant tous les éléments de base dont nous avons besoin : " +"un ensemble de données, un modèle, une fonction d'entraînement et une " +"fonction de test. Assemblons-les pour entraîner le modèle sur l'ensemble " +"de données de l'une de nos organisations (``trainloaders[0]``). Cela " +"simule la réalité de la plupart des projets d'apprentissage automatique " +"aujourd'hui : chaque organisation possède ses propres données et entraîne" +" les modèles uniquement sur ces données internes :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" +"L'entraînement du CNN simple sur notre fractionnement CIFAR-10 pendant 5 " +"époques devrait se traduire par une précision de l'ensemble de test " +"d'environ 41 %, ce qui n'est pas bon, mais en même temps, cela n'a pas " +"vraiment d'importance pour les besoins de ce tutoriel. L'intention était " +"juste de montrer un pipeline d'entraînement centralisé simpliste qui " +"prépare le terrain pour ce qui vient ensuite - l'apprentissage fédéré !" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "Étape 2 : Apprentissage fédéré avec Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" +"L'étape 1 a montré un simple pipeline de formation centralisé. Toutes les" +" données étaient au même endroit (c'est-à-dire un seul ``trainloader`` et" +" un seul ``valloader``). Ensuite, nous allons simuler une situation où " +"nous avons plusieurs ensembles de données dans plusieurs organisations et" +" où nous formons un modèle sur ces organisations à l'aide de " +"l'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "Mise à jour des paramètres du modèle" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#, fuzzy +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "" +"Dans l'apprentissage fédéré, le serveur envoie les paramètres du modèle " +"global au client, et le client met à jour le modèle local avec les " +"paramètres reçus du serveur. Il entraîne ensuite le modèle sur les " +"données locales (ce qui modifie les paramètres du modèle localement) et " +"renvoie les paramètres du modèle mis à jour/changés au serveur (ou, " +"alternativement, il renvoie seulement les gradients au serveur, et non " +"pas les paramètres complets du modèle)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" +"Nous avons besoin de deux fonctions d'aide pour mettre à jour le modèle " +"local avec les paramètres reçus du serveur et pour obtenir les paramètres" +" mis à jour du modèle local : ``set_parameters`` et ``get_parameters``. " +"Les deux fonctions suivantes font exactement cela pour le modèle PyTorch " +"ci-dessus." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" +"Les détails de ce fonctionnement ne sont pas vraiment importants ici " +"(n'hésite pas à consulter la documentation PyTorch si tu veux en savoir " +"plus). En substance, nous utilisons ``state_dict`` pour accéder aux " +"tenseurs de paramètres du modèle PyTorch. Les tenseurs de paramètres sont" +" ensuite convertis en/depuis une liste de ndarray NumPy (que Flower sait " +"sérialiser/désérialiser) :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Client de Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" +"Ceci étant dit, passons à la partie intéressante. Les systèmes " +"d'apprentissage fédérés se composent d'un serveur et de plusieurs " +"clients. Dans Flower, nous créons des clients en mettant en œuvre des " +"sous-classes de ``flwr.client.Client`` ou de ``flwr.client.NumPyClient``." +" Nous utilisons ``NumPyClient`` dans ce tutoriel parce qu'il est plus " +"facile à mettre en œuvre et qu'il nous oblige à rédiger moins de modèles " +"de chaudière." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "" +"``fit`` : reçoit les paramètres du modèle du serveur, entraîne les " +"paramètres du modèle sur les données locales et renvoie les paramètres du" +" modèle (mis à jour) au serveur" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "" +"``evaluate`` : reçoit les paramètres du modèle du serveur, évalue les " +"paramètres du modèle sur les données locales et renvoie le résultat de " +"l'évaluation au serveur" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" +"Nous avons mentionné que nos clients utiliseront les composants PyTorch " +"définis précédemment pour la formation et l'évaluation des modèles. " +"Voyons une simple mise en œuvre du client Flower qui réunit tout cela :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" +"Dans ce carnet, nous voulons simuler un système d'apprentissage fédéré " +"avec 10 clients sur une seule machine. Cela signifie que le serveur et " +"les 10 clients vivront sur une seule machine et partageront des " +"ressources telles que le CPU, le GPU et la mémoire. Avoir 10 clients " +"signifierait avoir 10 instances de ``FlowerClient`` en mémoire. Faire " +"cela sur une seule machine peut rapidement épuiser les ressources mémoire" +" disponibles, même si seulement un sous-ensemble de ces clients participe" +" à un seul tour d'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"Nous avons maintenant la classe ``FlowerClient`` qui définit " +"l'entraînement/évaluation côté client et ``client_fn`` qui permet à " +"Flower de créer des instances de ``FlowerClient`` chaque fois qu'il a " +"besoin d'appeler ``fit`` ou ``evaluate`` sur un client particulier. La " +"dernière étape consiste à démarrer la simulation réelle en utilisant " +"``flwr.simulation.start_simulation``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Serveur de Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" +"Flower dispose d'un certain nombre de stratégies intégrées, mais nous " +"pouvons également utiliser nos propres implémentations de stratégies pour" +" personnaliser presque tous les aspects de l'approche de l'apprentissage " +"fédéré. Pour cet exemple, nous utilisons l'implémentation intégrée " +"``FedAvg`` et nous la personnalisons en utilisant quelques paramètres de " +"base. La dernière étape est l'appel à ``start_simulation`` qui - tu l'as " +"deviné - démarre la simulation :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "Commencer la formation" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "Dans les coulisses" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" +"Alors, comment cela fonctionne-t-il ? Comment Flower exécute-t-il cette " +"simulation ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"Lorsque nous appelons ``start_simulation``, nous disons à Flower qu'il y " +"a 10 clients (``num_clients=10``). Flower demande alors à la stratégie " +"``FedAvg`` de sélectionner des clients. ``FedAvg` sait qu'il doit " +"sélectionner 100% des clients disponibles (``fraction_fit=1.0``), alors " +"il choisit 10 clients au hasard (c'est à dire 100% de 10)." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"Flower demande ensuite aux 10 clients sélectionnés d'entraîner le modèle." +" Lorsque le serveur reçoit les mises à jour des paramètres du modèle de " +"la part des clients, il les transmet à la stratégie (*FedAvg*) pour " +"qu'elle les agrège. La stratégie agrège ces mises à jour et renvoie le " +"nouveau modèle global, qui est ensuite utilisé dans le prochain cycle " +"d'apprentissage fédéré." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "Où est la précision ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" +"Tu as peut-être remarqué que toutes les mesures, à l'exception de " +"``pertes_distribuées``, sont vides. Où est passée la ``{\"précision\" : " +"float(précision)}`` ?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower peut automatiquement agréger les pertes renvoyées par les clients " +"individuels, mais il ne peut pas faire la même chose pour les mesures " +"dans le dictionnaire de mesures générique (celui avec la clé " +"``accuracy``). Les dictionnaires de mesures peuvent contenir des types de" +" mesures très différents et même des paires clé/valeur qui ne sont pas " +"des mesures du tout, donc le cadre ne sait pas (et ne peut pas) savoir " +"comment les gérer automatiquement." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"En tant qu'utilisateurs, nous devons indiquer au framework comment " +"gérer/agréger ces métriques personnalisées, et nous le faisons en passant" +" des fonctions d'agrégation de métriques à la stratégie. La stratégie " +"appellera alors ces fonctions chaque fois qu'elle recevra des métriques " +"d'ajustement ou d'évaluation de la part des clients. Les deux fonctions " +"possibles sont ``fit_metrics_aggregation_fn`` et " +"``evaluate_metrics_aggregation_fn``." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" +"Créons une simple fonction de calcul de la moyenne pondérée pour agréger " +"la mesure de \"précision\" que nous renvoie ``evaluate`` :" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"Nous avons maintenant un système complet qui effectue la formation " +"fédérée et l'évaluation fédérée. Il utilise la fonction ``moyenne " +"pondérée`` pour agréger les mesures d'évaluation personnalisées et " +"calcule une seule mesure de ``précision`` pour tous les clients du côté " +"du serveur." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"Les deux autres catégories de mesures (``pertes_centralisées`` et " +"``métriques_centralisées``) sont toujours vides car elles ne s'appliquent" +" que lorsque l'évaluation centralisée est utilisée. La deuxième partie du" +" tutoriel sur les fleurs couvrira l'évaluation centralisée." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "Remarques finales" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"Félicitations, tu viens d'entraîner un réseau neuronal convolutif, fédéré" +" sur 10 clients ! Avec ça, tu comprends les bases de l'apprentissage " +"fédéré avec Flower. La même approche que tu as vue peut être utilisée " +"avec d'autres cadres d'apprentissage automatique (pas seulement PyTorch) " +"et d'autres tâches (pas seulement la classification des images CIFAR-10)," +" par exemple le NLP avec Hugging Face Transformers ou la parole avec " +"SpeechBrain." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" +"Dans le prochain cahier, nous allons aborder des concepts plus avancés. " +"Tu veux personnaliser ta stratégie ? Initialiser des paramètres côté " +"serveur ? Ou évaluer le modèle agrégé côté serveur ? Nous aborderons tout" +" cela et bien plus encore dans le prochain tutoriel." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 2 " +"`__ va plus en profondeur sur les stratégies et toutes les " +"choses avancées que tu peux construire avec elles." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +#, fuzzy +msgid "Use a federated learning strategy" +msgstr "Stratégie de moyenne fédérée." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"Bienvenue dans la prochaine partie du tutoriel sur l'apprentissage " +"fédéré. Dans les parties précédentes de ce tutoriel, nous avons présenté " +"l'apprentissage fédéré avec PyTorch et Flower (`partie 1 " +"`__)." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"Dans ce carnet, nous allons commencer à personnaliser le système " +"d'apprentissage fédéré que nous avons construit dans le carnet " +"d'introduction (toujours en utilisant `Flower `__ et " +"`PyTorch `__)." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "Dépassons FedAvg avec les stratégies florales !" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"Chargeons maintenant les ensembles d'entraînement et de test CIFAR-10, " +"divisons-les en dix ensembles de données plus petits (chacun divisé en " +"ensemble d'entraînement et de validation), et enveloppons le tout dans " +"leur propre ``DataLoader``. Nous introduisons un nouveau paramètre " +"``num_clients`` qui nous permet d'appeler ``load_datasets`` avec " +"différents nombres de clients." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "Personnalisation de la stratégie" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" +"Jusqu'à présent, tout devrait te sembler familier si tu as travaillé sur " +"le cahier d'introduction. Avec cela, nous sommes prêts à présenter un " +"certain nombre de nouvelles fonctionnalités." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "Paramètres côté serveur **initialisation**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"Flower, par défaut, initialise le modèle global en demandant à un client " +"aléatoire les paramètres initiaux. Dans de nombreux cas, nous voulons " +"cependant avoir plus de contrôle sur l'initialisation des paramètres. " +"Flower te permet donc de passer directement les paramètres initiaux à la " +"Stratégie :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " +"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" +" nous regardons de près, nous pouvons voir que les journaux ne montrent " +"aucun appel à la méthode ``FlowerClient.get_parameters``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"Le fait de passer ``initial_parameters`` à la stratégie ``FedAvg`` " +"empêche Flower de demander les paramètres initiaux à l'un des clients. Si" +" nous regardons de près, nous pouvons voir que les journaux ne montrent " +"aucun appel à la méthode ``FlowerClient.get_parameters``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "Commencer par une stratégie personnalisée" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"Elle accepte un certain nombre d'arguments, parmi lesquels le " +"``client_fn`` utilisé pour créer les instances de ``FlowerClient``, le " +"nombre de clients à simuler ``num_clients``, le nombre de rounds " +"``num_rounds``, et la stratégie." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" +"La stratégie englobe l'approche/l'algorithme d'apprentissage fédéré, par " +"exemple, ``FedAvg`` ou ``FedAdagrad``. Essayons d'utiliser une stratégie " +"différente cette fois-ci :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "Paramètre côté serveur **évaluation**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" +"Flower peut évaluer le modèle agrégé côté serveur ou côté client. Les " +"évaluations côté client et côté serveur sont similaires à certains " +"égards, mais différentes à d'autres." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" +"**L'évaluation centralisée** (ou *évaluation côté serveur*) est " +"conceptuellement simple : elle fonctionne de la même manière que " +"l'évaluation dans l'apprentissage automatique centralisé. S'il existe un " +"ensemble de données côté serveur qui peut être utilisé à des fins " +"d'évaluation, alors c'est parfait. Nous pouvons évaluer le modèle " +"nouvellement agrégé après chaque cycle de formation sans avoir à envoyer " +"le modèle aux clients. Nous avons également la chance que l'ensemble de " +"notre ensemble de données d'évaluation soit disponible à tout moment." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" +"**L'évaluation fédérée** (ou évaluation côté client) est plus complexe, " +"mais aussi plus puissante : elle ne nécessite pas d'ensemble de données " +"centralisé et nous permet d'évaluer les modèles sur un plus grand " +"ensemble de données, ce qui donne souvent des résultats d'évaluation plus" +" réalistes. En fait, de nombreux scénarios exigent que nous utilisions " +"l'évaluation fédérée** si nous voulons obtenir des résultats d'évaluation" +" représentatifs. Mais cette puissance a un coût : une fois que nous " +"commençons à évaluer côté client, nous devons savoir que notre ensemble " +"de données d'évaluation peut changer au cours des cycles d'apprentissage " +"consécutifs si ces clients ne sont pas toujours disponibles. De plus, " +"l'ensemble de données détenu par chaque client peut également changer au " +"cours des cycles consécutifs. Cela peut conduire à des résultats " +"d'évaluation qui ne sont pas stables, donc même si nous ne changions pas " +"le modèle, nous verrions nos résultats d'évaluation fluctuer au cours des" +" cycles consécutifs." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"Nous avons vu comment l'évaluation fédérée fonctionne du côté client " +"(c'est-à-dire en implémentant la méthode ``evaluate`` dans " +"``FlowerClient``). Voyons maintenant comment nous pouvons évaluer les " +"paramètres du modèle agrégé du côté serveur :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "Simulation de moniteur" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "Envoi/réception de valeurs arbitraires vers/depuis les clients" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#, fuzzy +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"Comment pouvons-nous donc envoyer ce dictionnaire de configuration du " +"serveur aux clients ? Les stratégies de Flower intégrées fournissent un " +"moyen de le faire, et cela fonctionne de la même façon que l'évaluation " +"côté serveur. Nous fournissons une fonction à la stratégie, et la " +"stratégie appelle cette fonction pour chaque cycle d'apprentissage fédéré" +" :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "" +"Ensuite, nous allons simplement passer cette fonction à la stratégie " +"FedAvg avant de commencer la simulation :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"Comme nous pouvons le voir, les journaux des clients incluent maintenant " +"le cycle actuel d'apprentissage fédéré (qu'ils lisent depuis le " +"dictionnaire ``config``). Nous pouvons également configurer " +"l'apprentissage local pour qu'il s'exécute pendant une époque au cours du" +" premier et du deuxième cycle d'apprentissage fédéré, puis pendant deux " +"époques au cours du troisième cycle." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"Les clients peuvent également renvoyer des valeurs arbitraires au " +"serveur. Pour ce faire, ils renvoient un dictionnaire depuis ``fit`` " +"et/ou ``evaluate``. Nous avons vu et utilisé ce concept tout au long de " +"ce carnet sans le mentionner explicitement : notre ``FlowerClient`` " +"renvoie un dictionnaire contenant une paire clé/valeur personnalisée en " +"tant que troisième valeur de retour dans ``evaluate``." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" +"Comme dernière étape de ce carnet, voyons comment nous pouvons utiliser " +"Flower pour expérimenter avec un grand nombre de clients." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"Nous avons maintenant 1000 partitions, chacune contenant 45 exemples " +"d'entraînement et 5 exemples de validation. Etant donné que le nombre " +"d'exemples d'entraînement sur chaque client est assez faible, nous " +"devrions probablement entraîner le modèle un peu plus longtemps, nous " +"configurons donc les clients pour qu'ils effectuent 3 époques " +"d'entraînement local. Nous devrions également ajuster la fraction de " +"clients sélectionnés pour l'entraînement à chaque tour (nous ne voulons " +"pas que les 1000 clients participent à chaque tour), nous ajustons donc " +"``fraction_fit`` à ``0.05``, ce qui signifie que seulement 5% des clients" +" disponibles (donc 50 clients) seront sélectionnés pour l'entraînement à " +"chaque tour :" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" +"Dans ce carnet, nous avons vu comment nous pouvons progressivement " +"améliorer notre système en personnalisant la stratégie, en initialisant " +"les paramètres côté serveur, en choisissant une stratégie différente et " +"en évaluant les modèles côté serveur. C'est une sacrée flexibilité avec " +"si peu de code, n'est-ce pas ?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"Dans les sections ultérieures, nous avons vu comment nous pouvons " +"communiquer des valeurs arbitraires entre le serveur et les clients pour " +"personnaliser entièrement l'exécution côté client. Grâce à cette " +"capacité, nous avons construit une simulation d'apprentissage fédéré à " +"grande échelle en utilisant le moteur de client virtuel Flower et nous " +"avons mené une expérience impliquant 1000 clients dans la même charge de " +"travail - le tout dans un carnet Jupyter !" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 3 [WIP] " +"`__ montre comment construire une ``Stratégie`` entièrement " +"personnalisée à partir de zéro." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#, fuzzy +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"Dans ce tutoriel, tu apprendras ce qu'est l'apprentissage fédéré, tu " +"construiras ton premier système dans Flower, et tu l'étendras " +"progressivement. Si tu travailles sur toutes les parties du tutoriel, tu " +"seras capable de construire des systèmes d'apprentissage fédéré avancés " +"qui se rapprochent de l'état actuel de l'art dans le domaine." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" +"🧑‍🏫 Ce tutoriel part de zéro et n'attend aucune familiarité avec " +"l'apprentissage fédéré. Seule une compréhension de base de la science des" +" données et de la programmation Python est supposée." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ et " +"rejoignez la communauté Flower sur Slack pour vous connecter, poser des " +"questions et obtenir de l'aide : `Join Slack `__ 🌼 Nous serions ravis d'avoir de vos nouvelles dans le canal " +"``#introductions`` ! Et si quelque chose n'est pas clair, rendez-vous sur" +" le canal ``#questions``." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +#, fuzzy +msgid "Let's get started!" +msgstr "Allons-y, déclarons-le !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "Apprentissage automatique classique" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" +"Avant de commencer à discuter de l'apprentissage fédéré, récapitulons " +"rapidement la façon dont la plupart des apprentissages automatiques " +"fonctionnent aujourd'hui." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" +"Dans l'apprentissage automatique, nous avons un modèle et des données. Le" +" modèle peut être un réseau neuronal (comme illustré ici), ou quelque " +"chose d'autre, comme la régression linéaire classique." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "Modèle et données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" +"Nous entraînons le modèle en utilisant les données pour effectuer une " +"tâche utile. Une tâche peut consister à détecter des objets dans des " +"images, à transcrire un enregistrement audio ou à jouer à un jeu comme le" +" Go." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "Entraîne le modèle à l'aide des données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#, fuzzy +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" +"Dans la pratique, les données d'entraînement avec lesquelles nous " +"travaillons ne proviennent pas de la machine sur laquelle nous entraînons" +" le modèle. Elles sont créées ailleurs." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#, fuzzy +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" +"Elle prend naissance sur un smartphone par l'interaction de l'utilisateur" +" avec une application, une voiture qui collecte des données de capteurs, " +"un ordinateur portable qui reçoit des entrées via le clavier, ou un haut-" +"parleur intelligent qui écoute quelqu'un qui essaie de chanter une " +"chanson." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "Données sur un téléphone" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"Il est également important de mentionner que cet \"ailleurs\" n'est " +"généralement pas un seul endroit, mais plusieurs. Il peut s'agir de " +"plusieurs appareils fonctionnant tous avec la même application. Mais il " +"peut également s'agir de plusieurs organisations, qui génèrent toutes des" +" données pour la même tâche." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "Les données se trouvent sur de nombreux appareils" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" +"Ainsi, pour utiliser l'apprentissage automatique, ou tout autre type " +"d'analyse de données, l'approche utilisée par le passé consistait à " +"collecter toutes les données sur un serveur central. Ce serveur peut se " +"trouver quelque part dans un centre de données, ou quelque part dans le " +"cloud." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "Collecte centralisée des données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#, fuzzy +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" +"Une fois que toutes les données sont rassemblées en un seul endroit, nous" +" pouvons enfin utiliser des algorithmes d'apprentissage automatique pour " +"entraîner notre modèle sur les données. C'est l'approche d'apprentissage " +"automatique sur laquelle nous nous sommes fondamentalement toujours " +"appuyés." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "Formation au modèle central" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "Les défis de l'apprentissage automatique classique" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" +"L'approche classique de l'apprentissage automatique que nous venons de " +"voir peut être utilisée dans certains cas. Parmi les grands exemples, on " +"peut citer la catégorisation des photos de vacances, ou l'analyse du " +"trafic web. Des cas, où toutes les données sont naturellement disponibles" +" sur un serveur centralisé." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "Possibilité de centralisation" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" +"Mais cette approche ne peut pas être utilisée dans de nombreux autres cas" +" : lorsque les données ne sont pas disponibles sur un serveur centralisé," +" ou lorsque les données disponibles sur un serveur ne sont pas " +"suffisantes pour former un bon modèle." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "Impossible de centraliser" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#, fuzzy +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" +"Il existe de nombreuses raisons pour lesquelles l'approche classique " +"centralisée de l'apprentissage automatique ne fonctionne pas pour un " +"grand nombre de cas d'utilisation très importants dans le monde réel, " +"notamment :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#, fuzzy +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" +"**Réglementations** : GDPR (Europe), CCPA (Californie), PIPEDA (Canada), " +"LGPD (Brésil), PDPL (Argentine), KVKK (Turquie), POPI (Afrique du Sud), " +"FSS (Russie), CDPR (Chine), PDPB (Inde), PIPA (Corée), APPI (Japon), PDP " +"(Indonésie), PDPA (Singapour), APP (Australie), et d'autres " +"réglementations protègent les données sensibles contre le déplacement. En" +" fait, ces réglementations empêchent même parfois des organisations " +"individuelles de combiner les données de leurs propres utilisateurs pour " +"la formation à l'intelligence artificielle parce que ces utilisateurs " +"vivent dans différentes parties du monde, et que leurs données sont " +"régies par des réglementations différentes en matière de protection des " +"données." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**Préférence de l'utilisateur** : En plus de la réglementation, il existe" +" des cas d'utilisation où les utilisateurs s'attendent tout simplement à " +"ce qu'aucune donnée ne quitte leur appareil, jamais. Si tu tapes tes mots" +" de passe et tes informations de carte de crédit sur le clavier numérique" +" de ton téléphone, tu ne t'attends pas à ce que ces mots de passe " +"finissent sur le serveur de l'entreprise qui a développé ce clavier, n" +"'est-ce pas ? En fait, ce cas d'utilisation est la raison pour laquelle " +"l'apprentissage fédéré a été inventé en premier lieu." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#, fuzzy +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**volume de données** : certains capteurs, comme les caméras, produisent " +"un volume de données si important qu'il n'est ni possible ni économique " +"de collecter toutes les données (en raison, par exemple, de la bande " +"passante ou de l'efficacité des communications). Pensez à un service " +"ferroviaire national comptant des centaines de gares à travers le pays. " +"Si chacune de ces gares est équipée d'un certain nombre de caméras de " +"sécurité, le volume de données brutes sur les appareils qu'elles " +"produisent nécessite une infrastructure incroyablement puissante et " +"excessivement coûteuse pour les traiter et les stocker. Et la plupart de " +"ces données ne sont même pas utiles." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" +"Voici quelques exemples où l'apprentissage automatique centralisé ne " +"fonctionne pas :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#, fuzzy +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" +"Des dossiers médicaux sensibles provenant de plusieurs hôpitaux pour " +"former des modèles de détection du cancer" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" +"Informations financières provenant de différentes organisations pour " +"détecter les fraudes financières" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" +"Les données de localisation de ta voiture électrique pour mieux prédire " +"l'autonomie" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" +"Messages cryptés de bout en bout pour former de meilleurs modèles " +"d'autocomplétion" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#, fuzzy +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"La popularité des systèmes améliorant la confidentialité comme le " +"navigateur `Brave `__ ou le messager `Signal " +"`__ montre que les utilisateurs se soucient de la " +"confidentialité. En fait, ils choisissent la version améliorant la " +"confidentialité plutôt que d'autres alternatives, si une telle " +"alternative existe. Mais que pouvons-nous faire pour appliquer " +"l'apprentissage automatique et la science des données à ces cas afin " +"d'utiliser les données privées ? Après tout, ce sont tous des domaines " +"qui bénéficieraient de manière significative des récentes avancées en " +"matière d'IA." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "Apprentissage fédéré" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" +"L'apprentissage fédéré inverse simplement cette approche. Il permet " +"l'apprentissage automatique sur des données distribuées en déplaçant la " +"formation vers les données, au lieu de déplacer les données vers la " +"formation. Voici l'explication en une seule phrase :" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "Apprentissage automatique central : déplace les données vers le calcul" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "Apprentissage (machine) fédéré : déplacer le calcul vers les données" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" +"Ce faisant, il nous permet d'utiliser l'apprentissage automatique (et " +"d'autres approches de science des données) dans des domaines où cela " +"n'était pas possible auparavant. Nous pouvons désormais former " +"d'excellents modèles d'IA médicale en permettant à différents hôpitaux de" +" travailler ensemble. Nous pouvons résoudre les fraudes financières en " +"formant des modèles d'IA sur les données de différentes institutions " +"financières. Nous pouvons créer de nouvelles applications d'amélioration " +"de la confidentialité (telles que la messagerie sécurisée) qui ont une " +"meilleure IA intégrée que leurs alternatives d'amélioration de la " +"confidentialité. Et ce ne sont là que quelques exemples qui me viennent à" +" l'esprit. Au fur et à mesure que nous déployons l'apprentissage fédéré, " +"nous découvrons de plus en plus de domaines qui peuvent soudainement être" +" réinventés parce qu'ils ont maintenant accès à de vastes quantités de " +"données auparavant inaccessibles." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" +"Comment fonctionne l'apprentissage fédéré ? Commençons par une " +"explication intuitive." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "L'apprentissage fédéré en cinq étapes" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "Étape 0 : Initialisation du modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" +"Nous commençons par initialiser le modèle sur le serveur. C'est " +"exactement la même chose dans l'apprentissage centralisé classique : nous" +" initialisons les paramètres du modèle, soit de façon aléatoire, soit à " +"partir d'un point de contrôle précédemment sauvegardé." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "Initialise le modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" +"Étape 1 : envoyer le modèle à un certain nombre d'organisations/appareils" +" connectés (nœuds clients)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#, fuzzy +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" +"Ensuite, nous envoyons les paramètres du modèle global aux nœuds clients " +"connectés (par exemple, les appareils périphériques comme les smartphones" +" ou les serveurs appartenant à des organisations). Cela permet de " +"s'assurer que chaque nœud participant commence sa formation locale en " +"utilisant les mêmes paramètres de modèle. Nous n'utilisons souvent que " +"quelques-uns des nœuds connectés au lieu de tous les nœuds. La raison en " +"est que la sélection d'un nombre croissant de nœuds clients a des " +"rendements décroissants." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "Envoyer le modèle global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" +"Étape 2 : Entraîne le modèle localement sur les données de chaque " +"organisation/appareil (nœud client)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"Maintenant que tous les nœuds clients (sélectionnés) disposent de la " +"dernière version des paramètres du modèle global, ils commencent " +"l'entraînement local. Ils utilisent leur propre ensemble de données " +"locales pour entraîner leur propre modèle local. Ils n'entraînent pas le " +"modèle jusqu'à la convergence totale, mais ils ne s'entraînent que " +"pendant un petit moment. Il peut s'agir d'une seule époque sur les " +"données locales, ou même de quelques étapes (mini-batchs)." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "Forme-toi aux données locales" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "Étape 3 : Renvoyer les mises à jour du modèle au serveur" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" +"Après l'entraînement local, chaque nœud client possède une version " +"légèrement différente des paramètres du modèle qu'il a reçus à l'origine." +" Les paramètres sont tous différents parce que chaque nœud client a des " +"exemples différents dans son ensemble de données local. Les nœuds clients" +" renvoient ensuite ces mises à jour du modèle au serveur. Les mises à " +"jour du modèle qu'ils envoient peuvent être soit les paramètres complets " +"du modèle, soit seulement les gradients qui ont été accumulés au cours de" +" l'entraînement local." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "Envoyer les mises à jour du modèle" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" +"Étape 4 : Agréger les mises à jour des modèles dans un nouveau modèle " +"global" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"Le serveur reçoit les mises à jour du modèle des nœuds clients " +"sélectionnés. S'il a sélectionné 100 nœuds clients, il dispose maintenant" +" de 100 versions légèrement différentes du modèle global original, " +"chacune ayant été formée sur les données locales d'un client. Mais ne " +"voulions-nous pas avoir un seul modèle qui contienne les apprentissages " +"des données de l'ensemble des 100 nœuds clients ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "Mises à jour globales du modèle" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "Étape 5 : répète les étapes 1 à 4 jusqu'à ce que le modèle converge" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"Les étapes 1 à 4 constituent ce que nous appelons un cycle unique " +"d'apprentissage fédéré. Les paramètres du modèle global sont envoyés aux " +"nœuds clients participants (étape 1), les nœuds clients s'entraînent sur " +"leurs données locales (étape 2), ils envoient leurs modèles mis à jour au" +" serveur (étape 3), et le serveur agrège ensuite les mises à jour du " +"modèle pour obtenir une nouvelle version du modèle global (étape 4)." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#, fuzzy +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"Au cours d'un seul tour, chaque nœud client qui participe à cette " +"itération ne s'entraîne que pendant un petit moment. Cela signifie " +"qu'après l'étape d'agrégation (étape 4), nous avons un modèle qui a été " +"entraîné sur toutes les données de tous les nœuds clients participants, " +"mais seulement pendant un petit moment. Nous devons ensuite répéter ce " +"processus d'entraînement encore et encore pour finalement arriver à un " +"modèle entièrement entraîné qui fonctionne bien sur l'ensemble des " +"données de tous les nœuds clients." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"Félicitations, tu comprends maintenant les bases de l'apprentissage " +"fédéré. Il y a bien sûr beaucoup plus à discuter, mais c'était " +"l'apprentissage fédéré en quelques mots. Dans les parties suivantes de ce" +" tutoriel, nous irons plus en détail. Les questions intéressantes " +"comprennent : comment pouvons-nous sélectionner les meilleurs nœuds " +"clients qui devraient participer au prochain tour ? Quelle est la " +"meilleure façon d'agréger les mises à jour du modèle ? Comment pouvons-" +"nous gérer les nœuds clients qui échouent (stragglers) ?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#, fuzzy +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"Tout comme nous pouvons former un modèle sur les données décentralisées " +"de différents nœuds clients, nous pouvons également évaluer le modèle sur" +" ces données pour recevoir des mesures précieuses. C'est ce qu'on appelle" +" l'évaluation fédérée, parfois abrégée en FE. En fait, l'évaluation " +"fédérée fait partie intégrante de la plupart des systèmes d'apprentissage" +" fédéré." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "Analyses fédérées" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" +"Dans de nombreux cas, l'apprentissage automatique n'est pas nécessaire " +"pour tirer de la valeur des données. L'analyse des données peut donner " +"des indications précieuses, mais là encore, il n'y a souvent pas assez de" +" données pour obtenir une réponse claire. Quel est l'âge moyen auquel les" +" gens développent un certain type de problème de santé ? L'analyse " +"fédérée permet de telles requêtes sur plusieurs nœuds clients. Elle est " +"généralement utilisée en conjonction avec d'autres technologies de " +"renforcement de la confidentialité, comme l'agrégation sécurisée, pour " +"empêcher le serveur de voir les résultats soumis par les nœuds clients " +"individuels." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Fleur" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"L'apprentissage fédéré, l'évaluation fédérée et l'analyse fédérée " +"nécessitent une infrastructure pour déplacer les modèles d'apprentissage " +"automatique dans les deux sens, les entraîner et les évaluer sur des " +"données locales, puis agréger les modèles mis à jour. Flower fournit " +"l'infrastructure pour faire exactement cela de manière simple, évolutive " +"et sécurisée. En bref, Flower présente une approche unifiée de " +"l'apprentissage, de l'analyse et de l'évaluation fédérés. Il permet à " +"l'utilisateur de fédérer n'importe quelle charge de travail, n'importe " +"quel cadre de ML et n'importe quel langage de programmation." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" +"Serveur d'apprentissage fédéré de Flower et nœuds clients (voiture, " +"scooter, ordinateur personnel, roomba et téléphone)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" +"Félicitations, tu viens d'apprendre les bases de l'apprentissage fédéré " +"et son rapport avec l'apprentissage automatique classique (centralisé) !" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" +"Dans la prochaine partie de ce tutoriel, nous allons construire un " +"premier système d'apprentissage fédéré avec Flower." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"Avant de continuer, n'oublie pas de rejoindre la communauté Flower sur " +"Slack : `Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +#, fuzzy +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"Le `Tutoriel d'apprentissage fédéré Flower - Partie 1 " +"`__ " +"montre comment construire un système d'apprentissage fédéré simple avec " +"PyTorch et Flower." + +#~ msgid "Flower CLI commands" +#~ msgstr "Commandes CLI Flower" + +#~ msgid "Contributor guide" +#~ msgstr "Guide pour les contributeurs" + +#~ msgid "API Reference - Flower CLI commands" +#~ msgstr "Référence API - Commandes CLI pour Flower" + +#~ msgid "API Reference - flwr (Python package)" +#~ msgstr "Référence API - flwr (paquetage Python)" + +#~ msgid "Flower client." +#~ msgstr "Client de Flower" + +#~ msgid "Abstract base class for Flower clients." +#~ msgstr "" + +#~ msgid "Evaluate the provided parameters using the locally held dataset." +#~ msgstr "évaluer le modèle mis à jour sur l'ensemble de test local" + +#~ msgid "Parameters" +#~ msgstr "Paramètres du modèle." + +#~ msgid "" +#~ "The evaluation instructions containing " +#~ "(global) model parameters received from " +#~ "the server and a dictionary of " +#~ "configuration values used to customize " +#~ "the local evaluation process." +#~ msgstr "" + +#~ msgid "Returns" +#~ msgstr "Ressources" + +#~ msgid "" +#~ "The evaluation result containing the " +#~ "loss on the local dataset and " +#~ "other details such as the number " +#~ "of local data examples used for " +#~ "evaluation." +#~ msgstr "" + +#~ msgid "Return type" +#~ msgstr "" + +#~ msgid "Refine the provided parameters using the locally held dataset." +#~ msgstr "" + +#~ msgid "" +#~ "The training instructions containing (global)" +#~ " model parameters received from the " +#~ "server and a dictionary of configuration" +#~ " values used to customize the local" +#~ " training process." +#~ msgstr "" + +#~ msgid "" +#~ "The training result containing updated " +#~ "parameters and other details such as " +#~ "the number of local training examples" +#~ " used for training." +#~ msgstr "" + +#~ msgid "Return the current local model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "The get parameters instructions received " +#~ "from the server containing a dictionary" +#~ " of configuration values." +#~ msgstr "" + +#~ msgid "The current local model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "Return set of client's properties." +#~ msgstr "" + +#~ msgid "" +#~ "The get properties instructions received " +#~ "from the server containing a dictionary" +#~ " of configuration values." +#~ msgstr "" + +#~ msgid "The current client properties." +#~ msgstr "" + +#~ msgid "Start a Flower client node which connects to a Flower server." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " server. If the Flower server runs" +#~ " on the same machine on port " +#~ "8080, then `server_address` would be " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "An implementation of the abstract base class `flwr.client.Client`." +#~ msgstr "" + +#~ msgid "" +#~ "The maximum length of gRPC messages " +#~ "that can be exchanged with the " +#~ "Flower server. The default should be " +#~ "sufficient for most models. Users who" +#~ " train very large models might need" +#~ " to increase this value. Note that" +#~ " the Flower server needs to be " +#~ "started with the same value (see " +#~ "`flwr.server.start_server`), otherwise it will " +#~ "not know about the increased limit " +#~ "and block larger messages." +#~ msgstr "" + +#~ msgid "" +#~ "The PEM-encoded root certificates as " +#~ "a byte string or a path string." +#~ " If provided, a secure connection " +#~ "using the certificates will be " +#~ "established to an SSL-enabled Flower " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "DEPRECATED - USE 'transport' INSTEAD. " +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might change " +#~ "considerably in future versions of " +#~ "Flower." +#~ msgstr "" +#~ "DÉPRÉCIÉ - UTILISER 'transport' À LA " +#~ "PLACE Définit si le client interagit " +#~ "ou non avec le serveur à l'aide" +#~ " de l'API REST expérimentale. Cette " +#~ "fonctionnalité est expérimentale, elle " +#~ "pourrait changer considérablement dans les " +#~ "futures versions de Flower." + +#~ msgid "" +#~ "Configure the transport layer. Allowed " +#~ "values: - 'grpc-bidi': gRPC, " +#~ "bidirectional streaming - 'grpc-rere': " +#~ "gRPC, request-response (experimental) - " +#~ "'rest': HTTP (experimental)" +#~ msgstr "" +#~ "Valeurs autorisées : - 'grpc-bidi' " +#~ ": gRPC, flux bidirectionnel - 'grpc-" +#~ "rere' : gRPC, requête-réponse " +#~ "(expérimental) - 'rest' : HTTP " +#~ "(expérimental)" + +#~ msgid "Starting a gRPC client with an insecure server connection:" +#~ msgstr "" + +#~ msgid "Starting an SSL-enabled gRPC client:" +#~ msgstr "" + +#~ msgid "Abstract base class for Flower clients using NumPy." +#~ msgstr "" + +#~ msgid "The current (global) model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "Configuration parameters which allow the " +#~ "server to influence evaluation on the" +#~ " client. It can be used to " +#~ "communicate arbitrary values from the " +#~ "server to the client, for example, " +#~ "to influence the number of examples " +#~ "used for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "* **loss** (*float*) -- The evaluation" +#~ " loss of the model on the local" +#~ " dataset. * **num_examples** (*int*) -- " +#~ "The number of examples used for " +#~ "evaluation. * **metrics** (*Dict[str, " +#~ "Scalar]*) -- A dictionary mapping " +#~ "arbitrary string keys to values of " +#~ "type bool, bytes, float, int, or " +#~ "str. It can be used to " +#~ "communicate arbitrary values back to the" +#~ " server." +#~ msgstr "" + +#~ msgid "" +#~ "**loss** (*float*) -- The evaluation " +#~ "loss of the model on the local " +#~ "dataset." +#~ msgstr "" + +#~ msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "**metrics** (*Dict[str, Scalar]*) -- A " +#~ "dictionary mapping arbitrary string keys " +#~ "to values of type bool, bytes, " +#~ "float, int, or str. It can be " +#~ "used to communicate arbitrary values " +#~ "back to the server." +#~ msgstr "" + +#~ msgid "" +#~ "The previous return type format (int," +#~ " float, float) and the extended " +#~ "format (int, float, float, Dict[str, " +#~ "Scalar]) have been deprecated and " +#~ "removed since Flower 0.19." +#~ msgstr "" + +#~ msgid "Train the provided parameters using the locally held dataset." +#~ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" + +#~ msgid "" +#~ "Configuration parameters which allow the " +#~ "server to influence training on the " +#~ "client. It can be used to " +#~ "communicate arbitrary values from the " +#~ "server to the client, for example, " +#~ "to set the number of (local) " +#~ "training epochs." +#~ msgstr "" + +#~ msgid "" +#~ "* **parameters** (*NDArrays*) -- The " +#~ "locally updated model parameters. * " +#~ "**num_examples** (*int*) -- The number " +#~ "of examples used for training. * " +#~ "**metrics** (*Dict[str, Scalar]*) -- A " +#~ "dictionary mapping arbitrary string keys " +#~ "to values of type bool, bytes, " +#~ "float, int, or str. It can be " +#~ "used to communicate arbitrary values " +#~ "back to the server." +#~ msgstr "" + +#~ msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "**num_examples** (*int*) -- The number of examples used for training." +#~ msgstr "" + +#~ msgid "" +#~ "Configuration parameters requested by the " +#~ "server. This can be used to tell" +#~ " the client which parameters are " +#~ "needed along with some Scalar " +#~ "attributes." +#~ msgstr "" + +#~ msgid "" +#~ "**parameters** -- The local model " +#~ "parameters as a list of NumPy " +#~ "ndarrays." +#~ msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" + +#~ msgid "Return a client's set of properties." +#~ msgstr "Renvoie l'ensemble des propriétés d'un client." + +#~ msgid "" +#~ "Configuration parameters requested by the " +#~ "server. This can be used to tell" +#~ " the client which properties are " +#~ "needed along with some Scalar " +#~ "attributes." +#~ msgstr "" + +#~ msgid "" +#~ "**properties** -- A dictionary mapping " +#~ "arbitrary string keys to values of " +#~ "type bool, bytes, float, int, or " +#~ "str. It can be used to communicate" +#~ " arbitrary property values back to " +#~ "the server." +#~ msgstr "" + +#~ msgid "Start a Flower NumPyClient which connects to a gRPC server." +#~ msgstr "" + +#~ msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#~ msgstr "" + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "" + +#~ msgid "Starting a SSL-enabled client:" +#~ msgstr "" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type ClientLike. Note that the " +#~ "created client instances are ephemeral " +#~ "and will often be destroyed after " +#~ "a single method invocation. Since client" +#~ " instances are not long-lived, they" +#~ " should not attempt to carry state" +#~ " over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" +#~ "Une fonction créant des instances de " +#~ "client. La fonction doit prendre un " +#~ "seul argument `str` appelé `cid`. Elle" +#~ " doit retourner une seule instance de" +#~ " client de type ClientLike. Notez que" +#~ " les instances de client créées sont" +#~ " éphémères et seront souvent détruites " +#~ "après une seule invocation de méthode." +#~ " Puisque les instances de client ne" +#~ " sont pas de longue durée, elles " +#~ "ne doivent pas essayer de transporter" +#~ " l'état sur les invocations de " +#~ "méthode. Tout état requis par l'instance" +#~ " (modèle, jeu de données, hyperparamètres," +#~ " ...) doit être (re)créé dans l'appel" +#~ " à `client_fn` ou dans l'appel à " +#~ "n'importe quelle méthode de client (par" +#~ " exemple, charger les données d'évaluation" +#~ " dans la méthode `evaluate` elle-" +#~ "même)." + +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" + +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. Example: `{\"num_cpus\": 4," +#~ " \"num_gpus\": 1}`. To understand the " +#~ "GPU utilization caused by `num_gpus`, " +#~ "consult the Ray documentation on GPU " +#~ "support." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" + +#~ msgid "" +#~ "Currently supported values are `num_rounds`" +#~ " (int, default: 1) and `round_timeout` " +#~ "in seconds (float, default: None)." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" + +#~ msgid "Flower server." +#~ msgstr "Serveur de Flower" + +#~ msgid "Start a Flower server using the gRPC transport layer." +#~ msgstr "" + +#~ msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_server` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.strategy.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_server` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "The maximum length of gRPC messages " +#~ "that can be exchanged with the " +#~ "Flower clients. The default should be" +#~ " sufficient for most models. Users " +#~ "who train very large models might " +#~ "need to increase this value. Note " +#~ "that the Flower clients need to be" +#~ " started with the same value (see " +#~ "`flwr.client.start_client`), otherwise clients will" +#~ " not know about the increased limit" +#~ " and block larger messages." +#~ msgstr "" + +#~ msgid "" +#~ "Tuple containing root certificate, server " +#~ "certificate, and private key to start" +#~ " a secure SSL-enabled server. The " +#~ "tuple is expected to have three " +#~ "bytes elements in the following order:" +#~ " * CA certificate. * server " +#~ "certificate. * server private key." +#~ msgstr "" + +#~ msgid "" +#~ "Tuple containing root certificate, server " +#~ "certificate, and private key to start" +#~ " a secure SSL-enabled server. The " +#~ "tuple is expected to have three " +#~ "bytes elements in the following order:" +#~ msgstr "" + +#~ msgid "CA certificate." +#~ msgstr "Certificats" + +#~ msgid "server certificate." +#~ msgstr "Certificats" + +#~ msgid "server private key." +#~ msgstr "stratégie.du.serveur" + +#~ msgid "**hist** -- Object containing training and evaluation metrics." +#~ msgstr "" + +#~ msgid "Starting an insecure server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Starting an SSL-enabled server:" +#~ msgstr "Démarrer le serveur" + +#~ msgid "Contains the strategy abstraction and different implementations." +#~ msgstr "" + +#~ msgid "Abstract base class for server strategy implementations." +#~ msgstr "" + +#~ msgid "The current round of federated learning." +#~ msgstr "Qu'est-ce que l'apprentissage fédéré ?" + +#~ msgid "" +#~ "Successful updates from the previously " +#~ "selected and configured clients. Each " +#~ "pair of `(ClientProxy, FitRes` constitutes " +#~ "a successful update from one of " +#~ "the previously selected clients. Not " +#~ "that not all previously selected clients" +#~ " are necessarily included in this " +#~ "list: a client might drop out and" +#~ " not submit a result. For each " +#~ "client that did not submit an " +#~ "update, there should be an `Exception`" +#~ " in `failures`." +#~ msgstr "" + +#~ msgid "" +#~ "Exceptions that occurred while the " +#~ "server was waiting for client updates." +#~ msgstr "" + +#~ msgid "" +#~ "**aggregation_result** -- The aggregated " +#~ "evaluation result. Aggregation typically uses" +#~ " some variant of a weighted average." +#~ msgstr "" + +#~ msgid "Aggregate training results." +#~ msgstr "Résultats globaux de l'évaluation." + +#~ msgid "" +#~ "Successful updates from the previously " +#~ "selected and configured clients. Each " +#~ "pair of `(ClientProxy, FitRes)` constitutes" +#~ " a successful update from one of " +#~ "the previously selected clients. Not " +#~ "that not all previously selected clients" +#~ " are necessarily included in this " +#~ "list: a client might drop out and" +#~ " not submit a result. For each " +#~ "client that did not submit an " +#~ "update, there should be an `Exception`" +#~ " in `failures`." +#~ msgstr "" + +#~ msgid "" +#~ "**parameters** -- If parameters are " +#~ "returned, then the server will treat " +#~ "these as the new global model " +#~ "parameters (i.e., it will replace the" +#~ " previous parameters with the ones " +#~ "returned from this method). If `None`" +#~ " is returned (e.g., because there " +#~ "were only failures and no viable " +#~ "results) then the server will no " +#~ "update the previous model parameters, " +#~ "the updates received in this round " +#~ "are discarded, and the global model " +#~ "parameters remain the same." +#~ msgstr "" + +#~ msgid "Configure the next round of evaluation." +#~ msgstr "Configuration de l'évaluation côté serveur" + +#~ msgid "The client manager which holds all currently connected clients." +#~ msgstr "" + +#~ msgid "" +#~ "**evaluate_configuration** -- A list of " +#~ "tuples. Each tuple in the list " +#~ "identifies a `ClientProxy` and the " +#~ "`EvaluateIns` for this particular " +#~ "`ClientProxy`. If a particular `ClientProxy`" +#~ " is not included in this list, " +#~ "it means that this `ClientProxy` will" +#~ " not participate in the next round" +#~ " of federated evaluation." +#~ msgstr "" + +#~ msgid "Configure the next round of training." +#~ msgstr "" + +#~ msgid "" +#~ "**fit_configuration** -- A list of " +#~ "tuples. Each tuple in the list " +#~ "identifies a `ClientProxy` and the " +#~ "`FitIns` for this particular `ClientProxy`." +#~ " If a particular `ClientProxy` is not" +#~ " included in this list, it means " +#~ "that this `ClientProxy` will not " +#~ "participate in the next round of " +#~ "federated learning." +#~ msgstr "" + +#~ msgid "Evaluate the current model parameters." +#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" + +#~ msgid "" +#~ "This function can be used to " +#~ "perform centralized (i.e., server-side) " +#~ "evaluation of model parameters." +#~ msgstr "" + +#~ msgid "" +#~ "**evaluation_result** -- The evaluation " +#~ "result, usually a Tuple containing loss" +#~ " and a dictionary containing task-" +#~ "specific metrics (e.g., accuracy)." +#~ msgstr "" + +#~ msgid "Initialize the (global) model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "" +#~ "**parameters** -- If parameters are " +#~ "returned, then the server will treat " +#~ "these as the initial global model " +#~ "parameters." +#~ msgstr "" + +#~ msgid "Configurable FedAvg strategy implementation." +#~ msgstr "Configuration de l'évaluation fédérée" + +#~ msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#~ msgstr "" + +#~ msgid "" +#~ "Fraction of clients used during " +#~ "training. In case `min_fit_clients` is " +#~ "larger than `fraction_fit * " +#~ "available_clients`, `min_fit_clients` will still " +#~ "be sampled. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "" +#~ "Fraction of clients used during " +#~ "validation. In case `min_evaluate_clients` is" +#~ " larger than `fraction_evaluate * " +#~ "available_clients`, `min_evaluate_clients` will " +#~ "still be sampled. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Minimum number of clients used during training. Defaults to 2." +#~ msgstr "" + +#~ msgid "Minimum number of clients used during validation. Defaults to 2." +#~ msgstr "" + +#~ msgid "Minimum number of total clients in the system. Defaults to 2." +#~ msgstr "" + +#~ msgid "Optional function used for validation. Defaults to None." +#~ msgstr "" + +#~ msgid "Function used to configure training. Defaults to None." +#~ msgstr "" + +#~ msgid "Function used to configure validation. Defaults to None." +#~ msgstr "" + +#~ msgid "Whether or not accept rounds containing failures. Defaults to True." +#~ msgstr "" + +#~ msgid "Initial global model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "Metrics aggregation function, optional." +#~ msgstr "" + +#~ msgid "Aggregate evaluation losses using weighted average." +#~ msgstr "Résultats globaux de l'évaluation." + +#~ msgid "Aggregate fit results using weighted average." +#~ msgstr "" + +#~ msgid "Evaluate model parameters using an evaluation function." +#~ msgstr "" + +#~ msgid "Initialize global model parameters." +#~ msgstr "Initialise le modèle global" + +#~ msgid "Use a fraction of available clients for evaluation." +#~ msgstr "" + +#~ msgid "Return the sample size and the required number of available clients." +#~ msgstr "" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Averaging with Momentum strategy." +#~ msgstr "Stratégie de moyenne fédérée." + +#~ msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" +#~ msgstr "" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "" + +#~ msgid "" +#~ "Server-side learning rate used in " +#~ "server-side optimization. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Configurable QFedAvg strategy implementation." +#~ msgstr "" + +#~ msgid "Configurable fault-tolerant FedAvg strategy implementation." +#~ msgstr "" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "" + +#~ msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#~ msgstr "" +#~ "FedYogi - Stratégie d'apprentissage fédéré " +#~ "utilisant Yogi côté serveur. Mise en " +#~ "oeuvre basée sur https://arxiv.org/abs/2003.00295" + +#~ msgid "Fraction of clients used during training. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Fraction of clients used during validation. Defaults to 1.0." +#~ msgstr "" + +#~ msgid "Server-side learning rate. Defaults to 1e-1." +#~ msgstr "" + +#~ msgid "Client-side learning rate. Defaults to 1e-1." +#~ msgstr "" + +#~ msgid "Momentum parameter. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Second moment parameter. Defaults to 0.0." +#~ msgstr "" + +#~ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#~ msgstr "" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "" + +#~ msgid "Federated Optimization strategy." +#~ msgstr "Stratégie de moyenne fédérée." + +#~ msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#~ msgstr "" + +#~ msgid "" +#~ "The strategy in itself will not be" +#~ " different than FedAvg, the client " +#~ "needs to be adjusted. A proximal " +#~ "term needs to be added to the " +#~ "loss function during the training:" +#~ msgstr "" + +#~ msgid "" +#~ "\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +#~ "\n" +#~ msgstr "\\\\frac{\\Nmu}{2} || w - w^t ||^2" + +#~ msgid "" +#~ "Where $w^t$ are the global parameters" +#~ " and $w$ are the local weights " +#~ "the function will be optimized with." +#~ msgstr "" + +#~ msgid "In PyTorch, for example, the loss would go from:" +#~ msgstr "" + +#~ msgid "To:" +#~ msgstr "" + +#~ msgid "" +#~ "With `global_params` being a copy of " +#~ "the parameters before the training takes" +#~ " place." +#~ msgstr "" + +#~ msgid "" +#~ "The weight of the proximal term " +#~ "used in the optimization. 0.0 makes " +#~ "this strategy equivalent to FedAvg, and" +#~ " the higher the coefficient, the more" +#~ " regularization will be used (that " +#~ "is, the client parameters will need " +#~ "to be closer to the server " +#~ "parameters during training)." +#~ msgstr "" + +#~ msgid "Sends the proximal factor mu to the clients" +#~ msgstr "" + +#~ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#~ msgstr "" +#~ "Stratégie FedAdagrad - Optimisation fédérée" +#~ " adaptative à l'aide d'Adagrad." + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" + +#~ msgid "FedAdam - Adaptive Federated Optimization using Adam." +#~ msgstr "FedAdam - Optimisation fédérée adaptative utilisant Adam." + +#~ msgid "Momentum parameter. Defaults to 0.9." +#~ msgstr "" + +#~ msgid "Second moment parameter. Defaults to 0.99." +#~ msgstr "" + +#~ msgid "FedYogi [Reddi et al., 2020] strategy." +#~ msgstr "Stratégie FedYogi [Reddi et al., 2020]." + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "Optimisation fédérée adaptative à l'aide de Yogi." + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "L'apprentissage fédéré en cinq étapes" + +#~ msgid "Differential Privacy Wrappers in Flower" +#~ msgstr "Les enveloppes différentielles de confidentialité dans les fleurs" + +#~ msgid "Evaluation" +#~ msgstr "Solution" + +#~ msgid "Code examples" +#~ msgstr "Exemple de code complet" + +#~ msgid "" +#~ "Flower Quickstart (PyTorch): coming soon " +#~ "(the TensorFlow/Keras example can easily " +#~ "be changed to make it work with" +#~ " PyTorch)" +#~ msgstr "" + +#~ msgid "First time contributors" +#~ msgstr "Bonnes premières contributions" + +#~ msgid "First MXNet 1.6 example (MNIST)" +#~ msgstr "" + +#~ msgid "ImageNet (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "LSTM (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "Transformer (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "BERT (PyTorch/TensorFlow)" +#~ msgstr "" + +#~ msgid "Logging" +#~ msgstr "Enregistrement" + +#~ msgid "|cce04c6f539b421a91f5dba40287193f|" +#~ msgstr "|cce04c6f539b421a91f5dba40287193f|" + +#~ msgid "|e392aef42ba248e19e35446f95a6d1ca|" +#~ msgstr "|e392aef42ba248e19e35446f95a6d1ca|" + +#~ msgid "|7e028f44defe4f31a02debc729f2010d|" +#~ msgstr "|7e028f44defe4f31a02debc729f2010d|" + +#~ msgid "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" +#~ msgstr "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" + +#~ msgid "|9c0445ce962744e1a1c0a4abc697a334|" +#~ msgstr "|9c0445ce962744e1a1c0a4abc697a334|" + +#~ msgid "|a3246766a6db412888131b3bcdad0971|" +#~ msgstr "|a3246766a6db412888131b3bcdad0971|" + +#~ msgid "|db6f2bee32f143b8a5085b6a8ce1acd1|" +#~ msgstr "|db6f2bee32f143b8a5085b6a8ce1acd1|" + +#~ msgid "|405653bc8f874e9595fd59cc82b3d48c|" +#~ msgstr "|405653bc8f874e9595fd59cc82b3d48c|" + +#~ msgid "|073a728154ed406e8fe54e1d9f18dcb9|" +#~ msgstr "|073a728154ed406e8fe54e1d9f18dcb9|" + +#~ msgid "|50e80ea4f22945848b65ed7eed35e0e1|" +#~ msgstr "|50e80ea4f22945848b65ed7eed35e0e1|" + +#~ msgid "|f3cf9148d85e4b68b66b6c255b25e327|" +#~ msgstr "|f3cf9148d85e4b68b66b6c255b25e327|" + +#~ msgid "|1fedb4f8714947e1b13f03696180c741|" +#~ msgstr "|1fedb4f8714947e1b13f03696180c741|" + +#~ msgid "|a32d4ad1ccb34461942d75c7b2b51d65|" +#~ msgstr "|a32d4ad1ccb34461942d75c7b2b51d65|" + +#~ msgid "|3531696c52904cd3b9944034ab959d48|" +#~ msgstr "|3531696c52904cd3b9944034ab959d48|" + +#~ msgid "An Introduction to Federated Learning" +#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#~ msgid "Strategies in Federated Learning" +#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#~ msgid "Building a Strategy" +#~ msgstr "Stratégies intégrées" + +#~ msgid "Client and NumPyClient" +#~ msgstr "NumPyClient" + +#~ msgid "Strategies" +#~ msgstr "Stratégies personnalisées" + +#~ msgid "SSL-enabled Server and Client" +#~ msgstr "" + +#~ msgid "About these documents" +#~ msgstr "À propos de ces documents" + +#~ msgid "Index" +#~ msgstr "Index" + +#~ msgid "Search" +#~ msgstr "Recherche" + +#~ msgid "Copyright" +#~ msgstr "Droits d'auteur" + +#~ msgid "Save Progress" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server does not prescribe " +#~ "a way to persist model updates or" +#~ " evaluation results. Flower does not " +#~ "(yet) automatically save model updates " +#~ "on the server-side. It's on the" +#~ " roadmap to provide a built-in " +#~ "way of doing this." +#~ msgstr "" + +#~ msgid "Release Process" +#~ msgstr "Publier Flower" + +#~ msgid "Virtual Env Installation" +#~ msgstr "Virtualenv avec Anaconda" + +#~ msgid "Install development versions" +#~ msgstr "Installer les versions de développement de Flower" + +#~ msgid "Set up a virtual env" +#~ msgstr "Mettre en place un environment virtuel" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" +#~ "Notez que, pour construire la " +#~ "documentation localement (avec ``poetry run" +#~ " make html``, comme décrit ci-" +#~ "dessous), ``Pandoc _`" +#~ " doit être installé sur le système." + +#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" +#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" + +#~ msgid "XGBoost" +#~ msgstr "XGBoost" + +#~ msgid "Android ONNX on-device training" +#~ msgstr "" +#~ "Utiliser Android ONNX pour faire du " +#~ "training directement sur le téléphone" + +#~ msgid "Contribute on GitHub" +#~ msgstr "Contribuer sur GitHub" + +#~ msgid "How to write a good PR title" +#~ msgstr "Comment écrire un bon titre de PR" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "" +#~ "Un titre de PR bien choisi permet" +#~ " aux autres développeurs de rapidement " +#~ "comprendre l'intérêt et le scope des " +#~ "changements proposés. Voici un guide " +#~ "pour vous aider à écrire des bons" +#~ " titres de PR :" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. Soyez clair et concis : Donnez" +#~ " un résumé clair des changements de" +#~ " manière concise. 1. Utilisez des " +#~ "verbes actionnables : Commencez par des" +#~ " verbes comme \"Add\", \"Update\", ou " +#~ "\"Fix\" pour indiquer le but. 1. " +#~ "Inclure des renseignements pertinents : " +#~ "Mentionner la caractéristique ou le " +#~ "module concerné pour le contexte. 1. " +#~ "Gardez le court : Évitez les longs" +#~ " titres pour une lisibilité facile. " +#~ "1. Utiliser une bonne capitalisation et" +#~ " une ponctuation : Suivre les règles" +#~ " de grammaire pour la clarté." + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" +#~ "Commençons par quelques exemples de " +#~ "titres qui devraient être évités parce" +#~ " qu'ils ne fournissent pas d'information" +#~ " significative :" + +#~ msgid "Implement Algorithm" +#~ msgstr "Implement Algorithm" + +#~ msgid "Database" +#~ msgstr "Base de données" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "Add my_new_file.py to codebase" + +#~ msgid "Improve code in module" +#~ msgstr "Improve code in module" + +#~ msgid "Change SomeModule" +#~ msgstr "Change SomeModule" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "" +#~ "Voici quelques bons exemples qui " +#~ "fournissent de l'information utile sans " +#~ "répéter comment ils le font, comme " +#~ "cela est déjà visible dans la " +#~ "section \"Files changed\" de la PR " +#~ ":" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "Update docs banner to mention Flower Summit 2023" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "Remove unnecessary XGBoost dependency" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "Remove redundant attributes in strategies subclassing FedAvg" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" +#~ "Ajoute une tâche CI pour déployer " +#~ "le système de mise en scène " +#~ "lorsque la branche `main` change" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" + +#~ msgid "Differential privacy" +#~ msgstr "Confidentialité différentielle" + +#~ msgid "" +#~ "The Flower server does not prescribe " +#~ "a way to aggregate evaluation results," +#~ " but it enables the user to " +#~ "fully customize result aggregation." +#~ msgstr "" + +#~ msgid "Configure logging" +#~ msgstr "Configurer les clients" + +#~ msgid "" +#~ "The Flower logger keeps track of " +#~ "all core events that take place in" +#~ " federated learning workloads. It presents" +#~ " information by default following a " +#~ "standard message format:" +#~ msgstr "" +#~ "L'enregistreur de Flower garde la trace" +#~ " de tous les événements principaux " +#~ "qui ont lieu dans les charges de" +#~ " travail de l'apprentissage fédéré. Il " +#~ "présente les informations par défaut en" +#~ " suivant un format de message " +#~ "standard :" + +#~ msgid "" +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" +#~ msgstr "" + +#~ msgid "Saving log to file" +#~ msgstr "Enregistrement du journal dans un fichier" + +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" + +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" +#~ msgstr "" +#~ "Avec ce qui précède, Flower enregistrera" +#~ " le journal que tu vois sur ton" +#~ " terminal dans :code:`log.txt`. Ce fichier" +#~ " sera créé dans le même répertoire" +#~ " que celui à partir duquel tu " +#~ "exécutes le code. Si nous inspectons," +#~ " nous voyons que le journal ci-" +#~ "dessus est également enregistré, mais en" +#~ " préfixant chaque ligne avec " +#~ ":code:`identifier` :" + +#~ msgid "Log your own messages" +#~ msgstr "Enregistrer tes propres messages" + +#~ msgid "" +#~ "You might expand the information shown" +#~ " by default with the Flower logger" +#~ " by adding more messages relevant to" +#~ " your application. You can achieve " +#~ "this easily as follows." +#~ msgstr "" +#~ "Tu peux élargir les informations " +#~ "affichées par défaut avec le logger " +#~ "Flower en ajoutant d'autres messages " +#~ "pertinents pour ton application. Tu peux" +#~ " y parvenir facilement en procédant " +#~ "comme suit." + +#~ msgid "" +#~ "In this way your logger will show," +#~ " in addition to the default messages," +#~ " the ones introduced by the clients" +#~ " as specified above." +#~ msgstr "" +#~ "De cette façon, ton logger affichera," +#~ " en plus des messages par défaut, " +#~ "ceux introduits par les clients comme" +#~ " spécifié ci-dessus." + +#~ msgid "Log to a remote service" +#~ msgstr "Se connecter à un service distant" + +#~ msgid "" +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." +#~ msgstr "" + +#~ msgid "Enable SSL connections" +#~ msgstr "Collecte centralisée des données" + +#~ msgid "Python version" +#~ msgstr "Version Python" + +#~ msgid "" +#~ "Flower requires at least `Python 3.7 " +#~ "`_, but `Python 3.8" +#~ " `_ or above is " +#~ "recommended." +#~ msgstr "" +#~ "Flower nécessite `Python 3.7 " +#~ "`_ ou plus, nous " +#~ "recommandons `Python 3.8 " +#~ "`_." + +#~ msgid "Run simulations" +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "Simulating Federated Learning workloads is " +#~ "useful for a multitude of use-" +#~ "cases: you might want to run your" +#~ " workload on a large cohort of " +#~ "clients but without having to source," +#~ " configure and mange a large number" +#~ " of physical devices; you might want" +#~ " to run your FL workloads as " +#~ "fast as possible on the compute " +#~ "systems you have access to without " +#~ "having to go through a complex " +#~ "setup process; you might want to " +#~ "validate your algorithm on different " +#~ "scenarios at varying levels of data " +#~ "and system heterogeneity, client availability," +#~ " privacy budgets, etc. These are " +#~ "among some of the use-cases where" +#~ " simulating FL workloads makes sense. " +#~ "Flower can accommodate these scenarios " +#~ "by means of its `VirtualClientEngine " +#~ "`_ or VCE." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" +#~ msgstr "" + +#~ msgid "" +#~ "resource-aware: this means that each " +#~ "client gets assigned a portion of " +#~ "the compute and memory on your " +#~ "system. You as a user can control" +#~ " this at the beginning of the " +#~ "simulation and allows you to control " +#~ "the degree of parallelism of your " +#~ "Flower FL simulation. The fewer the " +#~ "resources per client, the more clients" +#~ " can run concurrently on the same " +#~ "hardware." +#~ msgstr "" + +#~ msgid "" +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." +#~ msgstr "" + +#~ msgid "" +#~ "ephemeral: this means that a client " +#~ "is only materialized when it is " +#~ "required in the FL process (e.g. " +#~ "to do `fit() `_). The object is" +#~ " destroyed afterwards, releasing the " +#~ "resources it was assigned and allowing" +#~ " in this way other clients to " +#~ "participate." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." +#~ msgstr "" + +#~ msgid "Launch your Flower simulation" +#~ msgstr "" + +#~ msgid "" +#~ "Running Flower simulations still require " +#~ "you to define your client class, a" +#~ " strategy, and utility functions to " +#~ "download and load (and potentially " +#~ "partition) your dataset. With that out" +#~ " of the way, launching your " +#~ "simulation is done with `start_simulation " +#~ "`_ " +#~ "and a minimal example looks as " +#~ "follows:" +#~ msgstr "" + +#~ msgid "VirtualClientEngine resources" +#~ msgstr "Moteur de client virtuel" + +#~ msgid "" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." +#~ msgstr "" + +#~ msgid "Assigning client resources" +#~ msgstr "" + +#~ msgid "" +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." +#~ msgstr "" + +#~ msgid "" +#~ "More often than not, you would " +#~ "probably like to adjust the resources" +#~ " your clients get assigned based on" +#~ " the complexity (i.e. compute and " +#~ "memory footprint) of your FL workload." +#~ " You can do so when starting " +#~ "your simulation by setting the argument" +#~ " `client_resources` to `start_simulation `_. Two " +#~ "keys are internally used by Ray to" +#~ " schedule and spawn workloads (in our" +#~ " case Flower clients):" +#~ msgstr "" + +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." +#~ msgstr "" + +#~ msgid "Let's see a few examples:" +#~ msgstr "" + +#~ msgid "" +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." +#~ msgstr "" + +#~ msgid "" +#~ "To understand all the intricate details" +#~ " on how resources are used to " +#~ "schedule FL clients and how to " +#~ "define custom resources, please take a" +#~ " look at the `Ray documentation " +#~ "`_." +#~ msgstr "" + +#~ msgid "Simulation examples" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "" +#~ "A few ready-to-run complete " +#~ "examples for Flower simulation in " +#~ "Tensorflow/Keras and PyTorch are provided " +#~ "in the `Flower repository " +#~ "`_. You can run " +#~ "them on Google Colab too:" +#~ msgstr "" + +#~ msgid "" +#~ "`Tensorflow/Keras Simulation " +#~ "`_: 100 clients collaboratively " +#~ "train a MLP model on MNIST." +#~ msgstr "" +#~ "`Quickstart TensorFlow (Code) " +#~ "`_" + +#~ msgid "" +#~ "`PyTorch Simulation " +#~ "`_: 100 clients collaboratively train" +#~ " a CNN model on MNIST." +#~ msgstr "" +#~ "`Quickstart PyTorch (Code) " +#~ "`_" + +#~ msgid "" +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" +#~ msgstr "" + +#~ msgid "Have the same Python environment in all nodes." +#~ msgstr "" + +#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#~ msgstr "" + +#~ msgid "" +#~ "Have a copy of your dataset in " +#~ "all nodes (more about this in " +#~ ":ref:`simulation considerations `)" +#~ msgstr "" + +#~ msgid "" +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." +#~ msgstr "" + +#~ msgid "" +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." +#~ msgstr "" + +#~ msgid "" +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" +#~ msgstr "" + +#~ msgid "" +#~ "With all the above done, you can" +#~ " run your code from the head " +#~ "node as you would if the " +#~ "simulation was running on a single " +#~ "node." +#~ msgstr "" + +#~ msgid "" +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." +#~ msgstr "" + +#~ msgid "Multi-node simulation good-to-know" +#~ msgstr "" + +#~ msgid "" +#~ "Here we list a few interesting " +#~ "functionality when running multi-node FL" +#~ " simulations:" +#~ msgstr "" + +#~ msgid "" +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." +#~ msgstr "" + +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" +#~ msgstr "" + +#~ msgid "Considerations for simulations" +#~ msgstr "Simulation de moniteur" + +#~ msgid "" +#~ "We are actively working on these " +#~ "fronts so to make it trivial to" +#~ " run any FL workload with Flower " +#~ "simulation." +#~ msgstr "" + +#~ msgid "" +#~ "The current VCE allows you to run" +#~ " Federated Learning workloads in simulation" +#~ " mode whether you are prototyping " +#~ "simple scenarios on your personal laptop" +#~ " or you want to train a complex" +#~ " FL pipeline across multiple high-" +#~ "performance GPU nodes. While we add " +#~ "more capabilities to the VCE, the " +#~ "points below highlight some of the " +#~ "considerations to keep in mind when " +#~ "designing your FL pipeline with Flower." +#~ " We also highlight a couple of " +#~ "current limitations in our implementation." +#~ msgstr "" + +#~ msgid "GPU resources" +#~ msgstr "Ressources" + +#~ msgid "" +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" +#~ msgstr "" + +#~ msgid "" +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." +#~ msgstr "" + +#~ msgid "" +#~ "not aware of other unrelated (i.e. " +#~ "not created by the VCE) workloads " +#~ "are running on the GPU. Two " +#~ "takeaways from this are:" +#~ msgstr "" + +#~ msgid "" +#~ "Your Flower server might need a " +#~ "GPU to evaluate the `global model` " +#~ "after aggregation (by instance when " +#~ "making use of the `evaluate method " +#~ "`_)" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." +#~ msgstr "" + +#~ msgid "" +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." +#~ msgstr "" + +#~ msgid "TensorFlow with GPUs" +#~ msgstr "Exemples de TensorFlow" + +#~ msgid "" +#~ "When `using a GPU with TensorFlow " +#~ "`_ nearly your " +#~ "entire GPU memory of all your GPUs" +#~ " visible to the process will be " +#~ "mapped. This is done by TensorFlow " +#~ "for optimization purposes. However, in " +#~ "settings such as FL simulations where" +#~ " we want to split the GPU into" +#~ " multiple `virtual` clients, this is " +#~ "not a desirable mechanism. Luckily we" +#~ " can disable this default behavior by" +#~ " `enabling memory growth " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "This is precisely the mechanism used " +#~ "in `Tensorflow/Keras Simulation " +#~ "`_ example." +#~ msgstr "" +#~ "`Quickstart TensorFlow (Code) " +#~ "`_" + +#~ msgid "Multi-node setups" +#~ msgstr "" + +#~ msgid "" +#~ "The VCE does not currently offer a" +#~ " way to control on which node a" +#~ " particular `virtual` client is executed." +#~ " In other words, if more than a" +#~ " single node have the resources " +#~ "needed by a client to run, then" +#~ " any of those nodes could get " +#~ "the client workload scheduled onto. " +#~ "Later in the FL process (i.e. in" +#~ " a different round) the same client" +#~ " could be executed by a different " +#~ "node. Depending on how your clients " +#~ "access their datasets, this might " +#~ "require either having a copy of " +#~ "all dataset partitions on all nodes " +#~ "or a dataset serving mechanism (e.g. " +#~ "using nfs, a database) to circumvent " +#~ "data duplication." +#~ msgstr "" + +#~ msgid "" +#~ "By definition virtual clients are " +#~ "`stateless` due to their ephemeral " +#~ "nature. A client state can be " +#~ "implemented as part of the Flower " +#~ "client class but users need to " +#~ "ensure this saved to persistent storage" +#~ " (e.g. a database, disk) and that " +#~ "can be retrieve later by the same" +#~ " client regardless on which node it" +#~ " is running from. This is related " +#~ "to the point above also since, in" +#~ " some way, the client's dataset could" +#~ " be seen as a type of `state`." +#~ msgstr "" + +#~ msgid "Save and load model checkpoints" +#~ msgstr "Sauvegarde et chargement des points de contrôle PyTorch" + +#~ msgid "" +#~ "Flower does not automatically save model" +#~ " updates on the server-side. This " +#~ "how-to guide describes the steps " +#~ "to save (and load) model checkpoints " +#~ "in Flower." +#~ msgstr "" + +#~ msgid "Legacy example guides" +#~ msgstr "" + +#~ msgid "Contributor tutorials" +#~ msgstr "Configuration du contributeur" + +#~ msgid "Contributor explanations" +#~ msgstr "Explications" + +#~ msgid "Flower Framework Documentation" +#~ msgstr "Documentation de Flower" + +#~ msgid "PyTorch" +#~ msgstr "Exemples de PyTorch" + +#~ msgid "TensorFlow" +#~ msgstr "TensorFlow" + +#~ msgid "Flower CLI reference" +#~ msgstr "Client de Flower" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "Référence pour l'API" + +#~ msgid "Unreleased" +#~ msgstr "Inédit" + +#~ msgid "**Deprecate Python 3.7**" +#~ msgstr "**Deprecate Python 3.7**" + +#~ msgid "" +#~ "Since Python 3.7 reached its end " +#~ "of life (EOL) on 2023-06-27, support " +#~ "for Python 3.7 is now deprecated " +#~ "and will be removed in an upcoming" +#~ " release." +#~ msgstr "" +#~ "Étant donné que Python 3.7 a " +#~ "atteint sa fin de vie (EOL) le " +#~ "2023-06-27, la prise en charge de " +#~ "Python 3.7 est désormais dépréciée et" +#~ " sera supprimée dans une prochaine " +#~ "version." + +#~ msgid "" +#~ "**Add new** `FedTrimmedAvg` **strategy** " +#~ "([#1769](https://github.com/adap/flower/pull/1769), " +#~ "[#1853](https://github.com/adap/flower/pull/1853))" +#~ msgstr "" +#~ "**Ajouter un nouveau** `FedTrimmedAvg` " +#~ "**stratégie** " +#~ "([#1769](https://github.com/adap/flower/pull/1769), " +#~ "[#1853](https://github.com/adap/flower/pull/1853))" + +#~ msgid "" +#~ "The new `FedTrimmedAvg` strategy implements" +#~ " Trimmed Mean by [Dong Yin, " +#~ "2018](https://arxiv.org/abs/1803.01498)" +#~ msgstr "" +#~ "La nouvelle stratégie `FedTrimmedAvg` met " +#~ "en œuvre la moyenne trimmée par " +#~ "[Dong Yin, 2018](https://arxiv.org/abs/1803.01498)" + +#~ msgid "" +#~ "**Add parameter aggregation to** `mt-" +#~ "pytorch` **code example** " +#~ "([#1785](https://github.com/adap/flower/pull/1785))" +#~ msgstr "" +#~ "**Ajouter l'agrégation des paramètres à** " +#~ "`mt-pytorch` **exemple de code** " +#~ "([#1785](https://github.com/adap/flower/pull/1785))" + +#~ msgid "" +#~ "The `mt-pytorch` example shows how " +#~ "to aggregate parameters when writing a" +#~ " driver script. The included `driver.py`" +#~ " and `server.py` have been aligned to" +#~ " demonstrate both the low-level way" +#~ " and the high-level way of " +#~ "building server-side logic." +#~ msgstr "" +#~ "L'exemple `mt-pytorch` montre comment " +#~ "agréger des paramètres lors de " +#~ "l'écriture d'un script de pilote. Les" +#~ " fichiers `driver.py` et `server.py` inclus" +#~ " ont été alignés pour démontrer à " +#~ "la fois la manière de bas niveau" +#~ " et la manière de haut niveau " +#~ "de construire la logique côté serveur." + +#~ msgid "" +#~ "**Introduce (experimental) gRPC request-" +#~ "response API** " +#~ "([#1867](https://github.com/adap/flower/pull/1867), " +#~ "[#1901](https://github.com/adap/flower/pull/1901))" +#~ msgstr "" +#~ "**Introduire l'API demande-réponse gRPC " +#~ "(expérimentale)** " +#~ "([#1867](https://github.com/adap/flower/pull/1867), " +#~ "[#1901](https://github.com/adap/flower/pull/1901))" + +#~ msgid "" +#~ "In addition to the existing gRPC " +#~ "API (based on bidirectional streaming) " +#~ "and the experimental REST API, there " +#~ "is now a new gRPC API that " +#~ "uses a request-response model to " +#~ "communicate with client nodes." +#~ msgstr "" +#~ "En plus de l'API gRPC existante " +#~ "(basée sur un flux bidirectionnel) et" +#~ " de l'API REST expérimentale, il " +#~ "existe désormais une nouvelle API gRPC" +#~ " qui utilise un modèle demande-" +#~ "réponse pour communiquer avec les nœuds" +#~ " clients." + +#~ msgid "" +#~ "Please note: The gRPC request-response" +#~ " API is still experimental and will" +#~ " likely change significantly over time." +#~ msgstr "" +#~ "Remarque : l'API requête-réponse gRPC" +#~ " est encore expérimentale et est " +#~ "susceptible de changer de manière " +#~ "significative au fil du temps." + +#~ msgid "" +#~ "**Replace the eperimental** " +#~ "`start_client(rest=True)` **with the new** " +#~ "`start_client(transport=\"rest\")` " +#~ "([#1880](https://github.com/adap/flower/pull/1880))" +#~ msgstr "" +#~ "**Remplacez le fichier expérimental** " +#~ "`start_client(rest=True) **par le nouveau** " +#~ "`start_client(transport=\"rest\")` " +#~ "([#1880](https://github.com/adap/flower/pull/1880))" + +#~ msgid "" +#~ "The (experimental) `start_client` argument " +#~ "`rest` was deprecated in favor of " +#~ "a new argument `transport`. " +#~ "`start_client(transport=\"rest\")` will yield the" +#~ " same behaviour as `start_client(rest=True)` " +#~ "did before. All code should migrate " +#~ "to the new argument `transport`. The " +#~ "deprecated argument `rest` will be " +#~ "removed in a future release." +#~ msgstr "" + +#~ msgid "" +#~ "**Migrate experimental REST API to " +#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" +#~ msgstr "" +#~ "**Migrer l'API REST expérimentale vers " +#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" + +#~ msgid "" +#~ "The (experimental) REST API used to " +#~ "be implemented in " +#~ "[FastAPI](https://fastapi.tiangolo.com/), but it has" +#~ " now been migrated to use " +#~ "[Starlette](https://www.starlette.io/) directly." +#~ msgstr "" +#~ "L'API REST (expérimentale) était auparavant" +#~ " implémentée dans " +#~ "[FastAPI](https://fastapi.tiangolo.com/), mais elle " +#~ "a maintenant été migrée pour utiliser" +#~ " directement [Starlette](https://www.starlette.io/)." + +#~ msgid "" +#~ "**Add a new gRPC option** " +#~ "([#2197](https://github.com/adap/flower/pull/2197))" +#~ msgstr "" +#~ "**Ajouter une nouvelle option gRPC** " +#~ "([#2197](https://github.com/adap/flower/pull/2197))" + +#~ msgid "" +#~ "We now start a gRPC server with" +#~ " the `grpc.keepalive_permit_without_calls` option " +#~ "set to 0 by default. This prevents" +#~ " the clients from sending keepalive " +#~ "pings when there is no outstanding " +#~ "stream." +#~ msgstr "" +#~ "Nous démarrons maintenant un serveur " +#~ "gRPC avec l'option " +#~ "`grpc.keepalive_permit_without_calls` réglée sur 0" +#~ " par défaut, ce qui empêche les " +#~ "clients d'envoyer des pings de maintien" +#~ " lorsqu'il n'y a pas de flux en" +#~ " attente." + +#~ msgid "" +#~ "**General improvements** " +#~ "([#1872](https://github.com/adap/flower/pull/1872), " +#~ "[#1866](https://github.com/adap/flower/pull/1866), " +#~ "[#1884](https://github.com/adap/flower/pull/1884))" +#~ msgstr "" +#~ "**Mettre à jour les exemples de " +#~ "code** ([#1291](https://github.com/adap/flower/pull/1291), " +#~ "[#1286](https://github.com/adap/flower/pull/1286), " +#~ "[#1282](https://github.com/adap/flower/pull/1282))" + +#~ msgid "Example projects" +#~ msgstr "Exemples" + +#~ msgid "" +#~ "`Flower simulation PyTorch " +#~ "`_" +#~ msgstr "" +#~ "`Flower Quickstart (TensorFlow/Keras) " +#~ "`_" + +#~ msgid "" +#~ "`Android Kotlin example " +#~ "`_" +#~ msgstr "" + +#~ msgid "`Android Java example `_" +#~ msgstr "" + +#~ msgid "Build a strategy from scratch" +#~ msgstr "Élaborer une stratégie à partir de zéro" + +#~ msgid "Customize the client" +#~ msgstr "Création du client IMDBC" + +#~ msgid "Get started with Flower" +#~ msgstr "" + +#~ msgid "Quickstart Android" +#~ msgstr "Démarrage rapide d'Android" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using TFLite and Flower on Android!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant TFLite et Flower sur" +#~ " Android !" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example " +#~ "`_ to" +#~ " learn more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet " +#~ "`_ " +#~ "pour en savoir plus." + +#~ msgid "Quickstart iOS" +#~ msgstr "Démarrage rapide iOS" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Neural Network on " +#~ "MNIST using Flower and CoreML on " +#~ "iOS devices." +#~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un réseau neuronal sur " +#~ "MNIST en utilisant Flower et CoreML " +#~ "sur les appareils iOS." + +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." +#~ msgstr "" +#~ "Tout d'abord, pour l'exécution du " +#~ "serveur Flower Python, il est recommandé" +#~ " de créer un environnement virtuel et" +#~ " de tout exécuter au sein d'un " +#~ "`virtualenv `_. Pour l'implémentation du client" +#~ " Flower dans iOS, il est recommandé" +#~ " d'utiliser Xcode comme notre IDE." + +#~ msgid "" +#~ "Our example consists of one Python " +#~ "*server* and two iPhone *clients* that" +#~ " all have the same model." +#~ msgstr "" +#~ "Notre exemple se compose d'un *serveur*" +#~ " Python et de deux *clients* iPhone" +#~ " qui ont tous le même modèle." -#~ msgid "Starting a SSL-enabled client:" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locaux. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." -#~ msgid "Start a Ray-based Flower simulation server." -#~ msgstr "Simulation de moniteur" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started to setup our Flower server " +#~ "environment. We first need to install" +#~ " Flower. You can do this by " +#~ "using pip:" +#~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "approximative de ce qui se passe, " +#~ "commençons à configurer notre environnement" +#~ " de serveur Flower. Nous devons " +#~ "d'abord installer Flower, ce que tu " +#~ "peux faire à l'aide de pip :" + +#~ msgid "Or Poetry:" +#~ msgstr "Ou de la poésie :" #~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "`str` argument called `cid`. It should" -#~ " return a single client instance of" -#~ " type ClientLike. Note that the " -#~ "created client instances are ephemeral " -#~ "and will often be destroyed after " -#~ "a single method invocation. Since client" -#~ " instances are not long-lived, they" -#~ " should not attempt to carry state" -#~ " over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset, hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training using CoreML " +#~ "as our local training pipeline and " +#~ "MNIST as our dataset. For simplicity " +#~ "reasons we will use the complete " +#~ "Flower client with CoreML, that has " +#~ "been implemented and stored inside the" +#~ " Swift SDK. The client implementation " +#~ "can be seen below:" +#~ msgstr "" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, exécutons une simple " +#~ "formation distribuée en utilisant CoreML " +#~ "comme pipeline de formation local et " +#~ "MNIST comme ensemble de données. Pour" +#~ " des raisons de simplicité, nous " +#~ "utiliserons le client Flower complet " +#~ "avec CoreML, qui a été mis en " +#~ "œuvre et stocké à l'intérieur du " +#~ "SDK Swift. La mise en œuvre du " +#~ "client peut être vue ci-dessous :" + +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgstr "" + +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." +#~ msgstr "" + +#~ msgid "" +#~ "Once the server is running we can" +#~ " start the clients in different " +#~ "terminals. Build and run the client " +#~ "through your Xcode, one through Xcode" +#~ " Simulator and the other by deploying" +#~ " it to your iPhone. To see more" +#~ " about how to deploy your app " +#~ "to iPhone or Simulator visit `here " +#~ "`_." +#~ msgstr "" +#~ "Une fois que le serveur fonctionne, " +#~ "nous pouvons démarrer les clients dans" +#~ " différents terminaux. Construis et exécute" +#~ " le client grâce à ton Xcode, " +#~ "l'un via le simulateur Xcode et " +#~ "l'autre en le déployant sur ton " +#~ "iPhone. Pour en savoir plus sur la" +#~ " façon de déployer ton application " +#~ "sur l'iPhone ou le simulateur, visite" +#~ " `ici `_." + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." +#~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré " +#~ "dans ton appareil ios. Le `code " +#~ "source complet " +#~ "`_ de " +#~ "cet exemple se trouve dans " +#~ ":code:`examples/ios`." + +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the open-source Flower community on " +#~ "Slack to connect, ask questions, and " +#~ "get help: `Join Slack `__ 🌼 We'd love to hear" +#~ " from you in the ``#introductions`` " +#~ "channel! And if anything is unclear, " +#~ "head over to the ``#questions`` channel." +#~ msgstr "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ et rejoignez" +#~ " la communauté open-source Flower sur" +#~ " Slack pour vous connecter, poser des" +#~ " questions et obtenir de l'aide : " +#~ "`Join Slack `__ " +#~ "🌼 Nous serions ravis d'avoir de " +#~ "vos nouvelles dans le canal " +#~ "``#introductions`` ! Et si quelque chose" +#~ " n'est pas clair, dirigez-vous vers" +#~ " le canal ``#questions``." + +#~ msgid "|bd48315a61c14495babefe3c7918b493|" +#~ msgstr "" + +#~ msgid "|c00d9e5b0d324d96b86da8a78b05b14b|" #~ msgstr "" -#~ "Une fonction créant des instances de " -#~ "client. La fonction doit prendre un " -#~ "seul argument `str` appelé `cid`. Elle" -#~ " doit retourner une seule instance de" -#~ " client de type ClientLike. Notez que" -#~ " les instances de client créées sont" -#~ " éphémères et seront souvent détruites " -#~ "après une seule invocation de méthode." -#~ " Puisque les instances de client ne" -#~ " sont pas de longue durée, elles " -#~ "ne doivent pas essayer de transporter" -#~ " l'état sur les invocations de " -#~ "méthode. Tout état requis par l'instance" -#~ " (modèle, jeu de données, hyperparamètres," -#~ " ...) doit être (re)créé dans l'appel" -#~ " à `client_fn` ou dans l'appel à " -#~ "n'importe quelle méthode de client (par" -#~ " exemple, charger les données d'évaluation" -#~ " dans la méthode `evaluate` elle-" -#~ "même)." -#~ msgid "" -#~ "The total number of clients in " -#~ "this simulation. This must be set " -#~ "if `clients_ids` is not set and " -#~ "vice-versa." +#~ msgid "|faae2ee10f4149c9907563c4f48ec6ea|" #~ msgstr "" -#~ msgid "" -#~ "List `client_id`s for each client. This" -#~ " is only required if `num_clients` is" -#~ " not set. Setting both `num_clients` " -#~ "and `clients_ids` with `len(clients_ids)` not" -#~ " equal to `num_clients` generates an " -#~ "error." +#~ msgid "|13a655510351455292f145a61d6c15d6|" #~ msgstr "" -#~ msgid "" -#~ "CPU and GPU resources for a single" -#~ " client. Supported keys are `num_cpus` " -#~ "and `num_gpus`. Example: `{\"num_cpus\": 4," -#~ " \"num_gpus\": 1}`. To understand the " -#~ "GPU utilization caused by `num_gpus`, " -#~ "consult the Ray documentation on GPU " -#~ "support." +#~ msgid "|13949884182846e3a91433190a936ba9|" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Server`. If no instance" -#~ " is provided, then `start_server` will " -#~ "create one." +#~ msgid "|9bf26cc650b146e88b4745df040ece37|" #~ msgstr "" -#~ msgid "" -#~ "Currently supported values are `num_rounds`" -#~ " (int, default: 1) and `round_timeout` " -#~ "in seconds (float, default: None)." +#~ msgid "|1590915480fc41708bd43e48af9582f9|" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgid "|e5ee96d702b64256b97b8ca99db10787|" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_simulation` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgid "|84840b244edd47c481278ce534c126cd|" #~ msgstr "" -#~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args: { " -#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " -#~ "False } An empty dictionary can " -#~ "be used (ray_init_args={}) to prevent " -#~ "any arguments from being passed to " -#~ "ray.init." +#~ msgid "|f33f5ebb3a844a2ba54bb6be3571b172|" #~ msgstr "" -#~ msgid "" -#~ "Optional dictionary containing arguments for" -#~ " the call to `ray.init`. If " -#~ "ray_init_args is None (the default), Ray" -#~ " will be initialized with the " -#~ "following default args:" +#~ msgid "|5645db4ba9c945518d51ff234f35c797|" #~ msgstr "" -#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgid "|317af8d28fcc479ab981047d058c4751|" #~ msgstr "" -#~ msgid "" -#~ "An empty dictionary can be used " -#~ "(ray_init_args={}) to prevent any arguments" -#~ " from being passed to ray.init." +#~ msgid "|8bfd0e697a494d5385662debafade6bf|" #~ msgstr "" #~ msgid "" -#~ "Set to True to prevent `ray.shutdown()`" -#~ " in case `ray.is_initialized()=True`." +#~ "Differential privacy (DP) is often " +#~ "mentioned in the context of Federated" +#~ " Learning. It is a privacy-preserving" +#~ " method used when analyzing and " +#~ "sharing statistical data, ensuring the " +#~ "privacy of individual participants. DP " +#~ "achieves this by adding statistical " +#~ "noise to the model updates, ensuring " +#~ "any individual participants’ information " +#~ "cannot be distinguished or re-" +#~ "identified. This technique can be " +#~ "considered an optimization that provides " +#~ "a quantifiable privacy protection measure." +#~ msgstr "" +#~ "La confidentialité différentielle (DP) est " +#~ "souvent mentionnée dans le contexte de" +#~ " l'apprentissage fédéré. Il s'agit d'une" +#~ " méthode de préservation de la vie" +#~ " privée utilisée lors de l'analyse et" +#~ " du partage de données statistiques, " +#~ "garantissant la confidentialité des " +#~ "participants individuels. La DP y " +#~ "parvient en ajoutant un bruit " +#~ "statistique aux mises à jour du " +#~ "modèle, garantissant que toute information " +#~ "sur les participants individuels ne peut" +#~ " être distinguée ou réidentifiée. Cette " +#~ "technique peut être considérée comme une" +#~ " optimisation qui fournit une mesure " +#~ "quantifiable de protection de la vie " +#~ "privée." + +#~ msgid "|e5dc001d27ad460caeab669e957b3c36|" #~ msgstr "" -#~ msgid "**hist** -- Object containing metrics from training." +#~ msgid "API Reference - Flower binaries" #~ msgstr "" -#~ msgid "Flower server." -#~ msgstr "Serveur de Flower" +#~ msgid "API Reference - flwr" +#~ msgstr "Référence pour l'API" -#~ msgid "Start a Flower server using the gRPC transport layer." +#~ msgid "" +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might change " +#~ "considerably in future versions of " +#~ "Flower." #~ msgstr "" -#~ msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#~ msgid "Returns a client's set of properties." #~ msgstr "" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_server` will create one." +#~ "Defines whether or not the client " +#~ "is interacting with the server using " +#~ "the experimental REST API. This feature" +#~ " is experimental, it might be change" +#~ " considerably in future versions of " +#~ "Flower." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.strategy.Strategy`. If no " -#~ "strategy is provided, then `start_server` " -#~ "will use `flwr.server.strategy.FedAvg`." +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "str argument called `cid`. It should " +#~ "return a single client instance of " +#~ "type ClientLike. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset,hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" #~ msgid "" -#~ "An implementation of the abstract base" -#~ " class `flwr.server.ClientManager`. If no " -#~ "implementation is provided, then " -#~ "`start_server` will use " -#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "str argument called `cid`. It should " +#~ "return a single client instance of " +#~ "type ClientLike. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not" +#~ msgstr "" + +#~ msgid "attempt to carry state over method invocations. Any state required by" #~ msgstr "" #~ msgid "" -#~ "The maximum length of gRPC messages " -#~ "that can be exchanged with the " -#~ "Flower clients. The default should be" -#~ " sufficient for most models. Users " -#~ "who train very large models might " -#~ "need to increase this value. Note " -#~ "that the Flower clients need to be" -#~ " started with the same value (see " -#~ "`flwr.client.start_client`), otherwise clients will" -#~ " not know about the increased limit" -#~ " and block larger messages." +#~ "the instance (model, dataset,hyperparameters, " +#~ "...) should be (re-)created in either" +#~ " the call to `client_fn` or the " +#~ "call to any of the client methods" +#~ " (e.g., load evaluation data in the" +#~ " `evaluate` method itself)." #~ msgstr "" #~ msgid "" -#~ "Tuple containing root certificate, server " -#~ "certificate, and private key to start" -#~ " a secure SSL-enabled server. The " -#~ "tuple is expected to have three " -#~ "bytes elements in the following order:" -#~ " * CA certificate. * server " -#~ "certificate. * server private key." +#~ "\\frac{\\mu}{2} || w - w^t ||^2\n" +#~ "\n" #~ msgstr "" #~ msgid "" -#~ "Tuple containing root certificate, server " -#~ "certificate, and private key to start" -#~ " a secure SSL-enabled server. The " -#~ "tuple is expected to have three " -#~ "bytes elements in the following order:" +#~ "Adaptive Federated Optimization using Adagrad" +#~ " (FedAdagrad) [Reddi et al., 2020] " +#~ "strategy." #~ msgstr "" -#~ msgid "CA certificate." -#~ msgstr "Certificats" +#~ msgid "" +#~ "Adaptive Federated Optimization using Adam " +#~ "(FedAdam) [Reddi et al., 2020] strategy." +#~ msgstr "" -#~ msgid "server certificate." -#~ msgstr "Certificats" +#~ msgid "" +#~ "Adaptive Federated Optimization using Yogi " +#~ "(FedYogi) [Reddi et al., 2020] strategy." +#~ msgstr "" -#~ msgid "server private key." -#~ msgstr "stratégie.du.serveur" +#~ msgid "Contributing Baselines" +#~ msgstr "Configuration du contributeur" -#~ msgid "**hist** -- Object containing training and evaluation metrics." +#~ msgid "" +#~ "Do you have a new federated " +#~ "learning paper and want to add a" +#~ " new baseline to Flower? Or do " +#~ "you want to add an experiment to" +#~ " an existing baseline paper? Great, " +#~ "we really appreciate your contribution." #~ msgstr "" -#~ msgid "Starting an insecure server:" -#~ msgstr "Démarrer le serveur" - -#~ msgid "Starting an SSL-enabled server:" -#~ msgstr "Démarrer le serveur" +#~ msgid "" +#~ "The goal of Flower Baselines is to" +#~ " reproduce experiments from popular papers" +#~ " to accelerate researchers by enabling " +#~ "faster comparisons to new strategies, " +#~ "datasets, models, and federated pipelines " +#~ "in general." +#~ msgstr "" -#~ msgid "Contains the strategy abstraction and different implementations." +#~ msgid "" +#~ "Before you start to work on a " +#~ "new baseline or experiment, please check" +#~ " the `Flower Issues " +#~ "`_ or `Flower " +#~ "Pull Requests `_ " +#~ "to see if someone else is already" +#~ " working on it. Please open a " +#~ "new issue if you are planning to" +#~ " work on a new baseline or " +#~ "experiment with a short description of" +#~ " the corresponding paper and the " +#~ "experiment you want to contribute." #~ msgstr "" -#~ msgid "Abstract base class for server strategy implementations." +#~ msgid "TL;DR: Adding a new Flower Baseline" #~ msgstr "" -#~ msgid "The current round of federated learning." -#~ msgstr "Qu'est-ce que l'apprentissage fédéré ?" - #~ msgid "" -#~ "Successful updates from the previously " -#~ "selected and configured clients. Each " -#~ "pair of `(ClientProxy, FitRes` constitutes " -#~ "a successful update from one of " -#~ "the previously selected clients. Not " -#~ "that not all previously selected clients" -#~ " are necessarily included in this " -#~ "list: a client might drop out and" -#~ " not submit a result. For each " -#~ "client that did not submit an " -#~ "update, there should be an `Exception`" -#~ " in `failures`." +#~ "Let's say you want to contribute " +#~ "the code of your most recent " +#~ "Federated Learning publication, *FedAweseome*. " +#~ "There are only three steps necessary " +#~ "to create a new *FedAweseome* Flower " +#~ "Baseline:" #~ msgstr "" -#~ msgid "" -#~ "Exceptions that occurred while the " -#~ "server was waiting for client updates." +#~ msgid "**Get the Flower source code on your machine**" #~ msgstr "" #~ msgid "" -#~ "**aggregation_result** -- The aggregated " -#~ "evaluation result. Aggregation typically uses" -#~ " some variant of a weighted average." +#~ "Fork the Flower codebase: got to " +#~ "the `Flower GitHub repo " +#~ "`_ and fork the " +#~ "code (click the *Fork* button in " +#~ "the top-right corner and follow " +#~ "the instructions)" #~ msgstr "" -#~ msgid "Aggregate training results." -#~ msgstr "Résultats globaux de l'évaluation." - #~ msgid "" -#~ "Successful updates from the previously " -#~ "selected and configured clients. Each " -#~ "pair of `(ClientProxy, FitRes)` constitutes" -#~ " a successful update from one of " -#~ "the previously selected clients. Not " -#~ "that not all previously selected clients" -#~ " are necessarily included in this " -#~ "list: a client might drop out and" -#~ " not submit a result. For each " -#~ "client that did not submit an " -#~ "update, there should be an `Exception`" -#~ " in `failures`." +#~ "Clone the (forked) Flower source code:" +#~ " :code:`git clone " +#~ "git@github.com:[your_github_username]/flower.git`" #~ msgstr "" #~ msgid "" -#~ "**parameters** -- If parameters are " -#~ "returned, then the server will treat " -#~ "these as the new global model " -#~ "parameters (i.e., it will replace the" -#~ " previous parameters with the ones " -#~ "returned from this method). If `None`" -#~ " is returned (e.g., because there " -#~ "were only failures and no viable " -#~ "results) then the server will no " -#~ "update the previous model parameters, " -#~ "the updates received in this round " -#~ "are discarded, and the global model " -#~ "parameters remain the same." +#~ "Open the code in your favorite " +#~ "editor (e.g., using VSCode: ``cd flower" +#~ " ; code .``)" #~ msgstr "" -#~ msgid "Configure the next round of evaluation." -#~ msgstr "Configuration de l'évaluation côté serveur" - -#~ msgid "The client manager which holds all currently connected clients." +#~ msgid "**Add the FedAwesome code**" #~ msgstr "" #~ msgid "" -#~ "**evaluate_configuration** -- A list of " -#~ "tuples. Each tuple in the list " -#~ "identifies a `ClientProxy` and the " -#~ "`EvaluateIns` for this particular " -#~ "`ClientProxy`. If a particular `ClientProxy`" -#~ " is not included in this list, " -#~ "it means that this `ClientProxy` will" -#~ " not participate in the next round" -#~ " of federated evaluation." +#~ "Add your :code:`FedAwesome` code under " +#~ ":code:`baselines/flwr_baselines/publications/[fedawesome]`" #~ msgstr "" -#~ msgid "Configure the next round of training." +#~ msgid "Add a `pyproject.toml` with all necessary dependencies" #~ msgstr "" -#~ msgid "" -#~ "**fit_configuration** -- A list of " -#~ "tuples. Each tuple in the list " -#~ "identifies a `ClientProxy` and the " -#~ "`FitIns` for this particular `ClientProxy`." -#~ " If a particular `ClientProxy` is not" -#~ " included in this list, it means " -#~ "that this `ClientProxy` will not " -#~ "participate in the next round of " -#~ "federated learning." +#~ msgid "Add a `README.md` describing how to use your baseline" #~ msgstr "" -#~ msgid "Evaluate the current model parameters." -#~ msgstr "``get_parameters`` : renvoie les paramètres du modèle local actuel" +#~ msgid "**Open a pull request**" +#~ msgstr "" -#~ msgid "" -#~ "This function can be used to " -#~ "perform centralized (i.e., server-side) " -#~ "evaluation of model parameters." +#~ msgid "Stage your changes: :code:`git add .`" #~ msgstr "" #~ msgid "" -#~ "**evaluation_result** -- The evaluation " -#~ "result, usually a Tuple containing loss" -#~ " and a dictionary containing task-" -#~ "specific metrics (e.g., accuracy)." +#~ "Commit & push: :code:`git commit -m " +#~ "\"Create new FedAweseome baseline\" ; " +#~ "git push`" #~ msgstr "" -#~ msgid "Initialize the (global) model parameters." -#~ msgstr "Initialise le modèle global" - #~ msgid "" -#~ "**parameters** -- If parameters are " -#~ "returned, then the server will treat " -#~ "these as the initial global model " -#~ "parameters." +#~ "Open a pull request: go to *your*" +#~ " fork of the Flower codebase and " +#~ "create a pull request that targets " +#~ "the Flower ``main``` branch" #~ msgstr "" -#~ msgid "Configurable FedAvg strategy implementation." -#~ msgstr "Configuration de l'évaluation fédérée" +#~ msgid "Further reading:" +#~ msgstr "Aide supplémentaire" -#~ msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#~ msgid "" +#~ "`GitHub docs: About forks " +#~ "`_" #~ msgstr "" #~ msgid "" -#~ "Fraction of clients used during " -#~ "training. In case `min_fit_clients` is " -#~ "larger than `fraction_fit * " -#~ "available_clients`, `min_fit_clients` will still " -#~ "be sampled. Defaults to 1.0." +#~ "`GitHub docs: Creating a pull request" +#~ " `_" #~ msgstr "" #~ msgid "" -#~ "Fraction of clients used during " -#~ "validation. In case `min_evaluate_clients` is" -#~ " larger than `fraction_evaluate * " -#~ "available_clients`, `min_evaluate_clients` will " -#~ "still be sampled. Defaults to 1.0." +#~ "`GitHub docs: Creating a pull request" +#~ " from a fork `_" #~ msgstr "" -#~ msgid "Minimum number of clients used during training. Defaults to 2." -#~ msgstr "" +#~ msgid "Requirements" +#~ msgstr "Changements nécessaires" -#~ msgid "Minimum number of clients used during validation. Defaults to 2." +#~ msgid "" +#~ "Contributing a new baseline is really" +#~ " easy. You only have to make " +#~ "sure that your federated learning " +#~ "experiments are running with Flower. As" +#~ " soon as you have created a " +#~ "Flower-based experiment, you can contribute" +#~ " it." #~ msgstr "" -#~ msgid "Minimum number of total clients in the system. Defaults to 2." +#~ msgid "" +#~ "It is recommended (but not required) " +#~ "to use `Hydra `_ to " +#~ "execute the experiment." #~ msgstr "" -#~ msgid "Optional function used for validation. Defaults to None." +#~ msgid "" +#~ "Please make sure to add your " +#~ "baseline or experiment to the " +#~ "corresponding directory as explained in " +#~ "`Executing Baseline `_. Give your baseline the " +#~ "unique identifier. For example, :code:`fedbn`" +#~ " refers to the paper \"FedBN: " +#~ "Federated Learning on non-IID Features" +#~ " via Local Batch Normalization\" and " +#~ "creates the corresponding directory " +#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn`. Then" +#~ " you create the experiment directory " +#~ "with the experiment name. For example," +#~ " the experiment that measures the " +#~ "convergence has the directory " +#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`." +#~ " This directory contains all your " +#~ "code and a :code:`README.md` with a " +#~ "link to the paper, the paper's " +#~ "abstract, and a detailed description of" +#~ " how to execute the experiments." #~ msgstr "" -#~ msgid "Function used to configure training. Defaults to None." +#~ msgid "" +#~ "Please also check if :code:`pyproject.toml`" +#~ " and :code:`requirements.txt` (all in the" +#~ " directory `baselines " +#~ "`_ contain" +#~ " all required Python packages (libraries," +#~ " frameworks, ...). If the required " +#~ "Python package is not yet listed, " +#~ "please add it to :code:`pyproject.toml`. " +#~ "If you need a different version of" +#~ " a package already listed, please try" +#~ " to ensure your experiment runs with" +#~ " the existing version listed in " +#~ ":code:`pyproject.toml` (or :code:`requirements.txt`). " +#~ "If that doesn't work, open a " +#~ "GitHub Issue and request the version " +#~ "change." #~ msgstr "" -#~ msgid "Function used to configure validation. Defaults to None." +#~ msgid "" +#~ "The experiment also needs to contain " +#~ "a file with a downloader for the" +#~ " dataset - if possible automatic. " +#~ "This can be included in one of " +#~ "the files or as an extra file." #~ msgstr "" -#~ msgid "Whether or not accept rounds containing failures. Defaults to True." +#~ msgid "" +#~ "Finally, please add plots for all " +#~ "experimental results your code is " +#~ "running to the :code:`experiment` directory" +#~ " and include them in :code:`README.md`. " +#~ "Doing this helps others and enables " +#~ "them to recognize your contributions " +#~ "quickly." #~ msgstr "" -#~ msgid "Initial global model parameters." -#~ msgstr "Initialise le modèle global" +#~ msgid "" +#~ "We are aware that a few libraries" +#~ " are available only via Conda. " +#~ "However, we want to encourage you " +#~ "to ensure that your code also runs" +#~ " well outside of Conda to make " +#~ "it more accessible to the broader " +#~ "research community." +#~ msgstr "" -#~ msgid "Metrics aggregation function, optional." +#~ msgid "Here is a checklist for adding a new baseline:" #~ msgstr "" -#~ msgid "Aggregate evaluation losses using weighted average." -#~ msgstr "Résultats globaux de l'évaluation." +#~ msgid "" +#~ "add required Python packages to " +#~ ":code:`pyproject.toml` or :code:`requirements.txt`" +#~ msgstr "" -#~ msgid "Aggregate fit results using weighted average." +#~ msgid "" +#~ "add all required code under " +#~ ":code:`baselines/flwr_baselines/publications/[new_publication]`" #~ msgstr "" -#~ msgid "Evaluate model parameters using an evaluation function." +#~ msgid "add a dataset downloader" #~ msgstr "" -#~ msgid "Initialize global model parameters." -#~ msgstr "Initialise le modèle global" +#~ msgid "add an experiment plot" +#~ msgstr "" -#~ msgid "Use a fraction of available clients for evaluation." +#~ msgid "add a :code:`README.md`" #~ msgstr "" -#~ msgid "Return the sample size and the required number of available clients." +#~ msgid "Usability" #~ msgstr "" -#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgid "" +#~ "Flower is known and loved for its" +#~ " usability. Therefore, make sure that " +#~ "your baseline or experiment can be " +#~ "executed with a single command such " +#~ "as :code:`./run.sh` or :code:`python3 " +#~ "main.py`. How you organize the " +#~ "experiments and the related code " +#~ "structure is up to you as an " +#~ "author, but please keep in mind to" +#~ " make sure that other users can " +#~ "easily understand and execute your " +#~ "baseline." #~ msgstr "" -#~ msgid "Federated Averaging with Momentum strategy." -#~ msgstr "Stratégie de moyenne fédérée." +#~ msgid "We look forward to your contribution!" +#~ msgstr "Exemple de première contribution" -#~ msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" -#~ msgstr "" +#~ msgid "flwr" +#~ msgstr "Fleur" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgid "binaries" #~ msgstr "" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." -#~ msgstr "" +#~ msgid "Flower Baselines" +#~ msgstr "Demande pour une nouvelle Flower Baseline" #~ msgid "" -#~ "Server-side learning rate used in " -#~ "server-side optimization. Defaults to 1.0." +#~ "Flower Baselines are a collection of " +#~ "organised scripts used to reproduce " +#~ "results from well-known publications or" +#~ " benchmarks. You can check which " +#~ "baselines already exist and/or contribute " +#~ "your own baseline." #~ msgstr "" -#~ msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#~ msgid "Flower requires `Python 3.7 `_ or above." +#~ msgstr "`Python 3.7 `_ ou plus" + +#~ msgid "|9e234df38403464899ad3aee36bf1b95|" #~ msgstr "" -#~ msgid "Configurable QFedAvg strategy implementation." +#~ msgid "|081158351506446f9f772cb45ee68523|" #~ msgstr "" -#~ msgid "Configurable fault-tolerant FedAvg strategy implementation." +#~ msgid "|e9325042b79c45ed96b5a8d2f6f3cdc9|" #~ msgstr "" -#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgid "|11b83bb107344db78a37266e080c4a7a|" #~ msgstr "" -#~ msgid "Federated Optim strategy interface." +#~ msgid "|cd764bcf6d174a9cb62880ace9a8a6bd|" #~ msgstr "" -#~ msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#~ msgid "|5c520984cced41e38f6bb4af416c3f84|" #~ msgstr "" -#~ "FedYogi - Stratégie d'apprentissage fédéré " -#~ "utilisant Yogi côté serveur. Mise en " -#~ "oeuvre basée sur https://arxiv.org/abs/2003.00295" -#~ msgid "Fraction of clients used during training. Defaults to 1.0." +#~ msgid "|66941b0608644cf1a2269a194d3bc0dd|" #~ msgstr "" -#~ msgid "Fraction of clients used during validation. Defaults to 1.0." +#~ msgid "|4b149f3a095b402bb8890275aabc9298|" #~ msgstr "" -#~ msgid "Server-side learning rate. Defaults to 1e-1." +#~ msgid "|675cf7d3d53a4817b5d47529c0758158|" #~ msgstr "" -#~ msgid "Client-side learning rate. Defaults to 1e-1." +#~ msgid "|7ca594e16ae7477790c2e3cf096ec7cd|" #~ msgstr "" -#~ msgid "Momentum parameter. Defaults to 0.0." +#~ msgid "|d669336577b545a081d5d74169a9bc4d|" #~ msgstr "" -#~ msgid "Second moment parameter. Defaults to 0.0." +#~ msgid "|00b3d6cde1ff410ba54eff58da4e033a|" #~ msgstr "" -#~ msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#~ msgid "|29a11f5353084c1995c538f7edef71a5|" #~ msgstr "" -#~ msgid "Configurable FedProx strategy implementation." +#~ msgid "|d62eda312fd44726bb5db2b761fe7e0d|" #~ msgstr "" -#~ msgid "Federated Optimization strategy." -#~ msgstr "Stratégie de moyenne fédérée." +#~ msgid "Using Baselines" +#~ msgstr "" -#~ msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#~ msgid "Structure" #~ msgstr "" #~ msgid "" -#~ "The strategy in itself will not be" -#~ " different than FedAvg, the client " -#~ "needs to be adjusted. A proximal " -#~ "term needs to be added to the " -#~ "loss function during the training:" +#~ "All baselines are available in the " +#~ "directory `baselines " +#~ "`_. This " +#~ "directory has two different files:" #~ msgstr "" #~ msgid "" -#~ "\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -#~ "\n" -#~ msgstr "\\\\frac{\\Nmu}{2} || w - w^t ||^2" +#~ "Both files contain all the information" +#~ " about required Python packages (libraries," +#~ " frameworks, ...) and their versions. " +#~ "You can install each library separately" +#~ " by using :code: `pip install` or " +#~ "you can use Poetry and run " +#~ "code:`poetry install` in the directory " +#~ "where you find the :code:`pyproject.toml` " +#~ "file. After installing all requirements, " +#~ "you can start to run your " +#~ "baseline." +#~ msgstr "" #~ msgid "" -#~ "Where $w^t$ are the global parameters" -#~ " and $w$ are the local weights " -#~ "the function will be optimized with." +#~ "Go to the baseline that you want" +#~ " to execute. The directories and " +#~ "files are structured so that you " +#~ "can first find the paper with " +#~ "their unique identifier such that, for" +#~ " example, :code:`FedProx` refers to the " +#~ "paper \"Federated Optimization in " +#~ "Heterogeneous Networks\". The :code:`fedprox` " +#~ "section contains all available experiments " +#~ "from that paper." #~ msgstr "" -#~ msgid "In PyTorch, for example, the loss would go from:" +#~ msgid "" +#~ "The experiment area contains a " +#~ ":code:`README.md` covering the corresponding " +#~ "paper, its abstract, and goal as " +#~ "well as a detailed description of " +#~ "how to run the baseline. Please " +#~ "use the :code:`README.md` to see how " +#~ "to execute each individual baseline." #~ msgstr "" -#~ msgid "To:" +#~ msgid "Available Baselines" #~ msgstr "" #~ msgid "" -#~ "With `global_params` being a copy of " -#~ "the parameters before the training takes" -#~ " place." +#~ "The following table lists all currently" +#~ " available baselines and the corresponding" +#~ " papers. If you want to add a" +#~ " new baseline or experiment, please " +#~ "check the `Contributing Baselines " +#~ "`_ section." #~ msgstr "" -#~ msgid "" -#~ "The weight of the proximal term " -#~ "used in the optimization. 0.0 makes " -#~ "this strategy equivalent to FedAvg, and" -#~ " the higher the coefficient, the more" -#~ " regularization will be used (that " -#~ "is, the client parameters will need " -#~ "to be closer to the server " -#~ "parameters during training)." +#~ msgid "Paper" #~ msgstr "" -#~ msgid "Sends the proximal factor mu to the clients" +#~ msgid "Experiment" #~ msgstr "" -#~ msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#~ msgid "Directory" #~ msgstr "" -#~ "Stratégie FedAdagrad - Optimisation fédérée" -#~ " adaptative à l'aide d'Adagrad." -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgid "`FedAvg `_" #~ msgstr "" -#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgid "MNIST" #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant fastai et Flower !" - -#~ msgid "FedAdam - Adaptive Federated Optimization using Adam." -#~ msgstr "FedAdam - Optimisation fédérée adaptative utilisant Adam." -#~ msgid "Momentum parameter. Defaults to 0.9." +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedavg_mnist/`" #~ msgstr "" -#~ msgid "Second moment parameter. Defaults to 0.99." +#~ msgid "`FedProx `_" #~ msgstr "" -#~ msgid "FedYogi [Reddi et al., 2020] strategy." -#~ msgstr "Stratégie FedYogi [Reddi et al., 2020]." - -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "Optimisation fédérée adaptative à l'aide de Yogi." - -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "L'apprentissage fédéré en cinq étapes" - -#~ msgid "Differential Privacy Wrappers in Flower" -#~ msgstr "Les enveloppes différentielles de confidentialité dans les fleurs" - -#~ msgid "Evaluation" -#~ msgstr "Solution" - -#~ msgid "Code examples" -#~ msgstr "Exemple de code complet" - -#~ msgid "" -#~ "Flower Quickstart (PyTorch): coming soon " -#~ "(the TensorFlow/Keras example can easily " -#~ "be changed to make it work with" -#~ " PyTorch)" +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedprox_mnist/`" #~ msgstr "" -#~ msgid "First time contributors" -#~ msgstr "Bonnes premières contributions" - -#~ msgid "First MXNet 1.6 example (MNIST)" +#~ msgid "`FedOpt `_" #~ msgstr "" -#~ msgid "ImageNet (PyTorch/TensorFlow)" +#~ msgid "sparse gradient task" #~ msgstr "" -#~ msgid "LSTM (PyTorch/TensorFlow)" +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/adaptive_federated_optimization`" #~ msgstr "" -#~ msgid "Transformer (PyTorch/TensorFlow)" +#~ msgid "`FedBN `_" #~ msgstr "" -#~ msgid "BERT (PyTorch/TensorFlow)" +#~ msgid "convergence rate" #~ msgstr "" -#~ msgid "Logging" -#~ msgstr "Enregistrement" - -#~ msgid "|cce04c6f539b421a91f5dba40287193f|" -#~ msgstr "|cce04c6f539b421a91f5dba40287193f|" - -#~ msgid "|e392aef42ba248e19e35446f95a6d1ca|" -#~ msgstr "|e392aef42ba248e19e35446f95a6d1ca|" - -#~ msgid "|7e028f44defe4f31a02debc729f2010d|" -#~ msgstr "|7e028f44defe4f31a02debc729f2010d|" - -#~ msgid "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" -#~ msgstr "|b89f7b7ae05e4ecd92baa69b7a9fe1be|" - -#~ msgid "|9c0445ce962744e1a1c0a4abc697a334|" -#~ msgstr "|9c0445ce962744e1a1c0a4abc697a334|" - -#~ msgid "|a3246766a6db412888131b3bcdad0971|" -#~ msgstr "|a3246766a6db412888131b3bcdad0971|" - -#~ msgid "|db6f2bee32f143b8a5085b6a8ce1acd1|" -#~ msgstr "|db6f2bee32f143b8a5085b6a8ce1acd1|" +#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`" +#~ msgstr "" -#~ msgid "|405653bc8f874e9595fd59cc82b3d48c|" -#~ msgstr "|405653bc8f874e9595fd59cc82b3d48c|" +#~ msgid "" +#~ "Flower requires `Python 3.7 " +#~ "`_ or above, we " +#~ "recommend `Python 3.8 " +#~ "`_." +#~ msgstr "" +#~ "Flower nécessite `Python 3.7 " +#~ "`_ ou plus, nous " +#~ "recommandons `Python 3.8 " +#~ "`_." -#~ msgid "|073a728154ed406e8fe54e1d9f18dcb9|" -#~ msgstr "|073a728154ed406e8fe54e1d9f18dcb9|" +#~ msgid "|6baade94cd14454e82ead34fcc29a182|" +#~ msgstr "" -#~ msgid "|50e80ea4f22945848b65ed7eed35e0e1|" -#~ msgstr "|50e80ea4f22945848b65ed7eed35e0e1|" +#~ msgid "|1209ecd819104c458d396cf665c7ed4f|" +#~ msgstr "" -#~ msgid "|f3cf9148d85e4b68b66b6c255b25e327|" -#~ msgstr "|f3cf9148d85e4b68b66b6c255b25e327|" +#~ msgid "|c088b02349304344a53f3ce1464225fb|" +#~ msgstr "" -#~ msgid "|1fedb4f8714947e1b13f03696180c741|" -#~ msgstr "|1fedb4f8714947e1b13f03696180c741|" +#~ msgid "|b54d50afc82a4a57a55997a9eaeb735b|" +#~ msgstr "" -#~ msgid "|a32d4ad1ccb34461942d75c7b2b51d65|" -#~ msgstr "|a32d4ad1ccb34461942d75c7b2b51d65|" +#~ msgid "|d17b57e97b714a25b43790d4b832fd87|" +#~ msgstr "" -#~ msgid "|3531696c52904cd3b9944034ab959d48|" -#~ msgstr "|3531696c52904cd3b9944034ab959d48|" +#~ msgid "|38966d05301a4854aa73c8c5033bfaab|" +#~ msgstr "" -#~ msgid "An Introduction to Federated Learning" -#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" +#~ msgid "|231d55f7926d4a5db02dcd724ec62529|" +#~ msgstr "" -#~ msgid "Strategies in Federated Learning" -#~ msgstr "Mise à l'échelle de l'apprentissage fédéré" +#~ msgid "|fb44f2e13a1b4b69b7a72234eedd13f4|" +#~ msgstr "" -#~ msgid "Building a Strategy" -#~ msgstr "Stratégies intégrées" +#~ msgid "|1cfc77af5d164030942e84d14268c256|" +#~ msgstr "" -#~ msgid "Client and NumPyClient" -#~ msgstr "NumPyClient" +#~ msgid "|0d50828231a64bc08223544a2d2fa216|" +#~ msgstr "" -#~ msgid "Strategies" -#~ msgstr "Stratégies personnalisées" +#~ msgid "|904387757ceb42fbaa1875f3e8061113|" +#~ msgstr "" -#~ msgid "SSL-enabled Server and Client" +#~ msgid "|68608e1b7c4842458c528b431c715f5a|" #~ msgstr "" -#~ msgid "About these documents" -#~ msgstr "À propos de ces documents" +#~ msgid "|2adb106bda97480bb4b33eac472e321e|" +#~ msgstr "" -#~ msgid "Index" -#~ msgstr "Index" +#~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" +#~ msgstr "" -#~ msgid "Search" -#~ msgstr "Recherche" +#~ msgid "Before the release" +#~ msgstr "Avant la sortie" -#~ msgid "Copyright" -#~ msgstr "Droits d'auteur" +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "Mettez à jour le journal des " +#~ "modifications (``changelog.md``) avec tous les" +#~ " changements pertinents qui se sont " +#~ "produits après la dernière version. Si" +#~ " la dernière version a été étiquetée" +#~ " ``v1.2.0``, vous pouvez utiliser l'URL " +#~ "suivante pour voir tous les commits " +#~ "qui ont été fusionnés dans ``main`` " +#~ "depuis lors :" -#~ msgid "Save Progress" +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" #~ msgstr "" +#~ "`GitHub : Compare v1.2.0...main " +#~ "`_" #~ msgid "" -#~ "The Flower server does not prescribe " -#~ "a way to persist model updates or" -#~ " evaluation results. Flower does not " -#~ "(yet) automatically save model updates " -#~ "on the server-side. It's on the" -#~ " roadmap to provide a built-in " -#~ "way of doing this." +#~ "Thank the authors who contributed since" +#~ " the last release. This command helps" +#~ " extract them: ``git log --format='%aN' " +#~ "v1.1.0..HEAD | sort -u``. The command" +#~ " has the same order as ``git " +#~ "shortlog``." #~ msgstr "" +#~ "Remerciez les auteurs qui ont contribué" +#~ " depuis la dernière version. Cette " +#~ "commande permet de les extraire : " +#~ "``git log --format='%aN' v1.1.0..HEAD | " +#~ "sort -u``. La commande a le même" +#~ " ordre que ``git shortlog``." -#~ msgid "Release Process" -#~ msgstr "Publier Flower" +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "Mettez à jour l'en-tête de section" +#~ " ``changelog.md`` ``Unreleased`` pour qu'il " +#~ "contienne le numéro de version et " +#~ "la date de la version que vous " +#~ "construisez. Créez une demande de " +#~ "traction avec le changement." -#~ msgid "Virtual Env Installation" -#~ msgstr "Virtualenv avec Anaconda" +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``" +#~ msgstr "" +#~ "Marquez le commit de la version " +#~ "avec le numéro de version dès que" +#~ " le PR est fusionné : ``git tag" +#~ " v0.12.3``, puis ``git push --tags``" -#~ msgid "Install development versions" -#~ msgstr "Installer les versions de développement de Flower" +#~ msgid "" +#~ "Build the release with ``./dev/build.sh``, " +#~ "then publish it with ``./dev/publish.sh``" +#~ msgstr "" +#~ "Construisez la version avec " +#~ "``./dev/build.sh``, puis publiez-la avec " +#~ "``./dev/publish.sh``" -#~ msgid "Set up a virtual env" -#~ msgstr "Mettre en place un environment virtuel" +#~ msgid "" +#~ "Create an entry in GitHub releases " +#~ "with the release notes for the " +#~ "previously tagged commit and attach the" +#~ " build artifacts (:code:`.whl` and " +#~ ":code:`.tar.gz`)." +#~ msgstr "" +#~ "Crée une entrée dans GitHub releases " +#~ "avec les notes de version pour le" +#~ " commit précédemment étiqueté et attache" +#~ " les artefacts de construction " +#~ "(:code:`.whl` et :code:`.tar.gz`)." #~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" #~ msgstr "" -#~ "Notez que, pour construire la " -#~ "documentation localement (avec ``poetry run" -#~ " make html``, comme décrit ci-" -#~ "dessous), ``Pandoc _`" -#~ " doit être installé sur le système." +#~ "Deuxièmement, créer un environnement virtuel" +#~ " (et l'activer). Si vous choisissez " +#~ "d'utiliser :code:`pyenv` (avec le plugin " +#~ ":code:`pyenv-virtualenv`) et que vous " +#~ "l'avez déjà installé, vous pouvez " +#~ "utiliser le script suivant (par défaut" +#~ " il utilisera :code:`Python 3.8.17`, mais" +#~ " vous pouvez le changer en " +#~ "fournissant une :code:`` spécifique)::" -#~ msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" -#~ msgstr "Un fine-tuning de LLaMA 2 avec Hugging Face et PyTorch" +#~ msgid "server.strategy.FedAvg" +#~ msgstr "serveur.stratégie.FedAvg" -#~ msgid "XGBoost" -#~ msgstr "XGBoost" +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "stratégie.serveur.FedAvgM" -#~ msgid "Android ONNX on-device training" +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "serveur.stratégie.FedOpt" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "serveur.stratégie.FedProx" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "serveur.stratégie.FedAdagrad" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "serveur.stratégie.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "serveur.stratégie.FedYogi" + +#~ msgid "" +#~ "`achiverram28`, `Adam Narozniak`, `Anass " +#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," +#~ " `Daniel J. Beutel`, `Daniel Nata " +#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " +#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " +#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " +#~ "(Sīchàng)`, `Taner Topal`" #~ msgstr "" -#~ "Utiliser Android ONNX pour faire du " -#~ "training directement sur le téléphone" -#~ msgid "Contribute on GitHub" -#~ msgstr "Contribuer sur GitHub" +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "Chargeons maintenant l'ensemble de formation" +#~ " et de test CIFAR-10, partitionnons-" +#~ "les en dix ensembles de données " +#~ "plus petits (chacun divisé en ensemble" +#~ " de formation et de validation), et" +#~ " enveloppons les partitions résultantes en" +#~ " créant un PyTorch ``DataLoader`` pour " +#~ "chacun d'entre eux :" -#~ msgid "How to write a good PR title" -#~ msgstr "Comment écrire un bon titre de PR" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " horizontal en utilisant XGBoost et " +#~ "Flower !" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." + +#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" +#~ msgstr "" + +#~ msgid "|7f1889391ad448e2a65920165f0d798c|" #~ msgstr "" -#~ "Un titre de PR bien choisi permet" -#~ " aux autres développeurs de rapidement " -#~ "comprendre l'intérêt et le scope des " -#~ "changements proposés. Voici un guide " -#~ "pour vous aider à écrire des bons" -#~ " titres de PR :" -#~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" #~ msgstr "" -#~ "1. Soyez clair et concis : Donnez" -#~ " un résumé clair des changements de" -#~ " manière concise. 1. Utilisez des " -#~ "verbes actionnables : Commencez par des" -#~ " verbes comme \"Add\", \"Update\", ou " -#~ "\"Fix\" pour indiquer le but. 1. " -#~ "Inclure des renseignements pertinents : " -#~ "Mentionner la caractéristique ou le " -#~ "module concerné pour le contexte. 1. " -#~ "Gardez le court : Évitez les longs" -#~ " titres pour une lisibilité facile. " -#~ "1. Utiliser une bonne capitalisation et" -#~ " une ponctuation : Suivre les règles" -#~ " de grammaire pour la clarté." -#~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" #~ msgstr "" -#~ "Commençons par quelques exemples de " -#~ "titres qui devraient être évités parce" -#~ " qu'ils ne fournissent pas d'information" -#~ " significative :" -#~ msgid "Implement Algorithm" -#~ msgstr "Implement Algorithm" +#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" +#~ msgstr "" -#~ msgid "Database" -#~ msgstr "Base de données" +#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "Add my_new_file.py to codebase" +#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" +#~ msgstr "" -#~ msgid "Improve code in module" -#~ msgstr "Improve code in module" +#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" +#~ msgstr "" -#~ msgid "Change SomeModule" -#~ msgstr "Change SomeModule" +#~ msgid "|9d20be8160f7451fb0f33b194506503f|" +#~ msgstr "" -#~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ msgid "|3d949f76988443c59990d2e64f05c386|" #~ msgstr "" -#~ "Voici quelques bons exemples qui " -#~ "fournissent de l'information utile sans " -#~ "répéter comment ils le font, comme " -#~ "cela est déjà visible dans la " -#~ "section \"Files changed\" de la PR " -#~ ":" -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "Update docs banner to mention Flower Summit 2023" +#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" +#~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "Remove unnecessary XGBoost dependency" +#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" +#~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" +#~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" #~ msgstr "" -#~ "Ajoute une tâche CI pour déployer " -#~ "le système de mise en scène " -#~ "lorsque la branche `main` change" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." #~ msgstr "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" -#~ msgid "Differential privacy" -#~ msgstr "Confidentialité différentielle" +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" #~ msgid "" -#~ "The Flower server does not prescribe " -#~ "a way to aggregate evaluation results," -#~ " but it enables the user to " -#~ "fully customize result aggregation." +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." #~ msgstr "" -#~ msgid "Configure logging" -#~ msgstr "Configurer les clients" +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "Exemple : PyTorch et MNIST" #~ msgid "" -#~ "The Flower logger keeps track of " -#~ "all core events that take place in" -#~ " federated learning workloads. It presents" -#~ " information by default following a " -#~ "standard message format:" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." #~ msgstr "" -#~ "L'enregistreur de Flower garde la trace" -#~ " de tous les événements principaux " -#~ "qui ont lieu dans les charges de" -#~ " travail de l'apprentissage fédéré. Il " -#~ "présente les informations par défaut en" -#~ " suivant un format de message " -#~ "standard :" +#~ "Dans ce tutoriel, nous allons apprendre," +#~ " comment former un réseau neuronal " +#~ "convolutif sur MNIST en utilisant Flower" +#~ " et PyTorch." #~ msgid "" -#~ "containing relevant information including: log" -#~ " message level (e.g. :code:`INFO`, " -#~ ":code:`DEBUG`), a timestamp, the line " -#~ "where the logging took place from, " -#~ "as well as the log message itself." -#~ " In this way, the logger would " -#~ "typically display information on your " -#~ "terminal as follows:" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" -#~ "contenant des informations pertinentes, " -#~ "notamment : le niveau du message " -#~ "de journal (par exemple :code:`INFO`, " -#~ ":code:`DEBUG`), un horodatage, la ligne " -#~ "à partir de laquelle l'enregistrement a" -#~ " eu lieu, ainsi que le message " -#~ "de journal lui-même. De cette " -#~ "façon, le logger afficherait typiquement " -#~ "des informations sur ton terminal comme" -#~ " suit :" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, installons PyTorch et la" +#~ " bibliothèque **torchvision** :" -#~ msgid "Saving log to file" -#~ msgstr "Enregistrement du journal dans un fichier" +#~ msgid "Ready... Set... Train!" +#~ msgstr "Prêts... prêts... entraînez-vous !" #~ msgid "" -#~ "By default, the Flower log is " -#~ "outputted to the terminal where you " -#~ "launch your Federated Learning workload " -#~ "from. This applies for both gRPC-" -#~ "based federation (i.e. when you do " -#~ ":code:`fl.server.start_server`) and when using " -#~ "the :code:`VirtualClientEngine` (i.e. when you" -#~ " do :code:`fl.simulation.start_simulation`). In " -#~ "some situations you might want to " -#~ "save this log to disk. You can " -#~ "do so by calling the " -#~ "`fl.common.logger.configure() " -#~ "`_" -#~ " function. For example:" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons un " +#~ "simple entraînement distribué avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " d'entraînement et l'architecture de notre" +#~ " réseau sont basées sur l'exemple " +#~ "MNIST de base de PyTorch " +#~ "`_. Cela" +#~ " te permettra de voir à quel " +#~ "point il est facile d'envelopper ton " +#~ "code avec Flower et de commencer " +#~ "l'entraînement de manière fédérée. Nous " +#~ "te fournissons deux scripts d'aide, à" +#~ " savoir *run-server.sh*, et *run-" +#~ "clients.sh*. N'aie pas peur de regarder" +#~ " à l'intérieur, ils sont assez " +#~ "simples =)." + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." #~ msgstr "" -#~ "Par défaut, le journal de Flower " -#~ "est affiché dans le terminal à " -#~ "partir duquel tu as lancé ta " -#~ "charge de travail d'apprentissage fédéré. " -#~ "Cela s'applique à la fois à la " -#~ "fédération basée sur gRPC (c'est-à-dire " -#~ "lorsque tu fais :code:`fl.server.start_server`) " -#~ "et à l'utilisation du " -#~ ":code:`VirtualClientEngine` (c'est-à-dire lorsque tu" -#~ " fais :code:`fl.simulation.start_simulation`). Dans " -#~ "certaines situations, tu peux vouloir " -#~ "sauvegarder ce journal sur le disque." -#~ " Tu peux le faire en appelant " -#~ "la fonction `fl.common.logger.configure() " -#~ "`_." -#~ " Par exemple :" +#~ "Et voilà ! Tu devrais voir la " +#~ "procédure d'entraînement et, après quelques" +#~ " itérations, la précision du test " +#~ "pour chaque client." + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." #~ msgid "" -#~ "With the above, Flower will record " -#~ "the log you see on your terminal" -#~ " to :code:`log.txt`. This file will " -#~ "be created in the same directory " -#~ "as were you are running the code" -#~ " from. If we inspect we see the" -#~ " log above is also recorded but " -#~ "prefixing with :code:`identifier` each line:" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" #~ msgstr "" -#~ "Avec ce qui précède, Flower enregistrera" -#~ " le journal que tu vois sur ton" -#~ " terminal dans :code:`log.txt`. Ce fichier" -#~ " sera créé dans le même répertoire" -#~ " que celui à partir duquel tu " -#~ "exécutes le code. Si nous inspectons," -#~ " nous voyons que le journal ci-" -#~ "dessus est également enregistré, mais en" -#~ " préfixant chaque ligne avec " -#~ ":code:`identifier` :" - -#~ msgid "Log your own messages" -#~ msgstr "Enregistrer tes propres messages" +#~ "Dans le script d'aide au serveur " +#~ "*run-server.sh*, tu trouveras le code " +#~ "suivant qui exécute le fichier " +#~ ":code:`server.py`" #~ msgid "" -#~ "You might expand the information shown" -#~ " by default with the Flower logger" -#~ " by adding more messages relevant to" -#~ " your application. You can achieve " -#~ "this easily as follows." +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." #~ msgstr "" -#~ "Tu peux élargir les informations " -#~ "affichées par défaut avec le logger " -#~ "Flower en ajoutant d'autres messages " -#~ "pertinents pour ton application. Tu peux" -#~ " y parvenir facilement en procédant " -#~ "comme suit." +#~ "Nous pouvons aller un peu plus " +#~ "loin et voir que :code:`server.py` lance" +#~ " simplement un serveur qui coordonnera " +#~ "trois tours de formation. Flower Les " +#~ "serveurs sont très personnalisables, mais " +#~ "pour les charges de travail simples, " +#~ "nous pouvons démarrer un serveur à " +#~ "l'aide de la fonction :ref:`start_server " +#~ "` et " +#~ "laisser toutes les possibilités de " +#~ "configuration à leurs valeurs par " +#~ "défaut, comme on peut le voir " +#~ "ci-dessous." #~ msgid "" -#~ "In this way your logger will show," -#~ " in addition to the default messages," -#~ " the ones introduced by the clients" -#~ " as specified above." +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." #~ msgstr "" -#~ "De cette façon, ton logger affichera," -#~ " en plus des messages par défaut, " -#~ "ceux introduits par les clients comme" -#~ " spécifié ci-dessus." - -#~ msgid "Log to a remote service" -#~ msgstr "Se connecter à un service distant" +#~ "Ensuite, jetons un coup d'œil au " +#~ "fichier *run-clients.sh*. Tu verras " +#~ "qu'il contient la boucle principale qui" +#~ " démarre un ensemble de *clients*." #~ msgid "" -#~ "The :code:`fl.common.logger.configure` function, " -#~ "also allows specifying a host to " -#~ "which logs can be pushed (via " -#~ ":code:`POST`) through a native Python " -#~ ":code:`logging.handler.HTTPHandler`. This is a " -#~ "particularly useful feature in " -#~ ":code:`gRPC`-based Federated Learning workloads " -#~ "where otherwise gathering logs from all" -#~ " entities (i.e. the server and the" -#~ " clients) might be cumbersome. Note " -#~ "that in Flower simulation, the server" -#~ " automatically displays all logs. You " -#~ "can still specify a :code:`HTTPHandler` " -#~ "should you wish to backup or " -#~ "analyze the logs somewhere else." +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." #~ msgstr "" -#~ "La fonction :code:`fl.common.logger.configure` " -#~ "permet également de spécifier un hôte" -#~ " vers lequel les journaux peuvent " -#~ "être envoyés (via :code:`POST`) par " -#~ "l'intermédiaire d'un :code:`logging.handler.HTTPHandler`" -#~ " natif de Python. Il s'agit d'une " -#~ "fonction particulièrement utile dans les " -#~ "charges de travail d'apprentissage fédéré " -#~ "basées sur :code:`gRPC` où la collecte" -#~ " des journaux de toutes les entités" -#~ " (c'est-à-dire le serveur et les " -#~ "clients) pourrait s'avérer fastidieuse. Notez" -#~ " que dans la simulation Flower, le" -#~ " serveur affiche automatiquement tous les" -#~ " journaux. Vous pouvez toujours spécifier" -#~ " un :code:`HTTPHandler` si vous souhaitez" -#~ " sauvegarder ou analyser les journaux " -#~ "à un autre endroit." - -#~ msgid "Enable SSL connections" -#~ msgstr "Collecte centralisée des données" +#~ "**cid** : c'est l'identifiant du client." +#~ " C'est un nombre entier qui identifie" +#~ " de façon unique l'identifiant du " +#~ "client." -#~ msgid "Python version" -#~ msgstr "Version Python" +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." #~ msgid "" -#~ "Flower requires at least `Python 3.7 " -#~ "`_, but `Python 3.8" -#~ " `_ or above is " -#~ "recommended." +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." #~ msgstr "" -#~ "Flower nécessite `Python 3.7 " -#~ "`_ ou plus, nous " -#~ "recommandons `Python 3.8 " -#~ "`_." +#~ "**Cette information n'est pas requise " +#~ "par le client, mais elle nous aide" +#~ " à partitionner l'ensemble de données " +#~ "MNIST original pour nous assurer que " +#~ "chaque client travaille sur des sous-" +#~ "ensembles uniques des ensembles *formation*" +#~ " et *test*." -#~ msgid "Run simulations" -#~ msgstr "Simulation de moniteur" +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "Encore une fois, nous pouvons aller " +#~ "plus loin et regarder dans " +#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" +#~ " avoir parcouru le code d'analyse des" +#~ " arguments au début de notre fonction" +#~ " :code:`main`, tu trouveras un appel " +#~ "à :code:`mnist.load_data`. Cette fonction est" +#~ " responsable du partitionnement des " +#~ "ensembles de données MNIST originaux " +#~ "(*training* et *test*) et renvoie un " +#~ ":code:`torch.utils.data.DataLoader` s pour chacun" +#~ " d'entre eux. Nous instancions ensuite " +#~ "un objet :code:`PytorchMNISTClient` avec notre" +#~ " ID client, nos DataLoaders, le " +#~ "nombre d'époques dans chaque tour et " +#~ "le périphérique que nous voulons " +#~ "utiliser pour l'entraînement (CPU ou " +#~ "GPU)." #~ msgid "" -#~ "Simulating Federated Learning workloads is " -#~ "useful for a multitude of use-" -#~ "cases: you might want to run your" -#~ " workload on a large cohort of " -#~ "clients but without having to source," -#~ " configure and mange a large number" -#~ " of physical devices; you might want" -#~ " to run your FL workloads as " -#~ "fast as possible on the compute " -#~ "systems you have access to without " -#~ "having to go through a complex " -#~ "setup process; you might want to " -#~ "validate your algorithm on different " -#~ "scenarios at varying levels of data " -#~ "and system heterogeneity, client availability," -#~ " privacy budgets, etc. These are " -#~ "among some of the use-cases where" -#~ " simulating FL workloads makes sense. " -#~ "Flower can accommodate these scenarios " -#~ "by means of its `VirtualClientEngine " -#~ "`_ or VCE." +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." #~ msgstr "" +#~ "L'objet :code:`PytorchMNISTClient` est finalement" +#~ " transmis à :code:`fl.client.start_client` avec" +#~ " l'adresse du serveur lorsque le " +#~ "processus de formation commence." + +#~ msgid "A Closer Look" +#~ msgstr "Regarder de plus près" #~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_client " -#~ "`_) in the" -#~ " sense that they can be configure " -#~ "by creating a class inheriting, for " -#~ "example, from `flwr.client.NumPyClient `_ and therefore" -#~ " behave in an identical way. In " -#~ "addition to that, clients managed by " -#~ "the :code:`VirtualClientEngine` are:" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" #~ msgstr "" +#~ "Maintenant, examinons de près le " +#~ ":code:`PytorchMNISTClient` à l'intérieur du " +#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " +#~ "voyons ce qu'il fait :" #~ msgid "" -#~ "resource-aware: this means that each " -#~ "client gets assigned a portion of " -#~ "the compute and memory on your " -#~ "system. You as a user can control" -#~ " this at the beginning of the " -#~ "simulation and allows you to control " -#~ "the degree of parallelism of your " -#~ "Flower FL simulation. The fewer the " -#~ "resources per client, the more clients" -#~ " can run concurrently on the same " -#~ "hardware." +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" #~ msgstr "" +#~ "La première chose à remarquer est " +#~ "que :code:`PytorchMNISTClient` instancie un " +#~ "modèle CNN dans son constructeur" #~ msgid "" -#~ "self-managed: this means that you " -#~ "as a user do not need to " -#~ "launch clients manually, instead this " -#~ "gets delegated to :code:`VirtualClientEngine`'s " -#~ "internals." +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." #~ msgstr "" +#~ "Le code du CNN est disponible sous" +#~ " :code:`quickstart-pytorch.mnist` et il est" +#~ " reproduit ci-dessous. Il s'agit du" +#~ " même réseau que celui que l'on " +#~ "trouve dans `Exemple basique de MNIST" +#~ " `_." #~ msgid "" -#~ "ephemeral: this means that a client " -#~ "is only materialized when it is " -#~ "required in the FL process (e.g. " -#~ "to do `fit() `_). The object is" -#~ " destroyed afterwards, releasing the " -#~ "resources it was assigned and allowing" -#~ " in this way other clients to " -#~ "participate." +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" #~ msgstr "" +#~ "La deuxième chose à noter est que" +#~ " la classe :code:`PytorchMNISTClient` hérite " +#~ "de :code:`fl.client.Client`, et qu'elle doit" +#~ " donc implémenter les méthodes suivantes" +#~ " :" #~ msgid "" -#~ "The :code:`VirtualClientEngine` implements `virtual`" -#~ " clients using `Ray `_, " -#~ "an open-source framework for scalable" -#~ " Python workloads. In particular, Flower's" -#~ " :code:`VirtualClientEngine` makes use of " -#~ "`Actors `_ to spawn `virtual` clients" -#~ " and run their workload." +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." #~ msgstr "" +#~ "En comparant la classe abstraite à " +#~ "sa classe dérivée :code:`PytorchMNISTClient`, " +#~ "tu remarqueras que :code:`fit` appelle " +#~ "une fonction :code:`train` et que " +#~ ":code:`evaluate` appelle une fonction " +#~ ":code:`test` :." -#~ msgid "Launch your Flower simulation" +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" #~ msgstr "" +#~ "Ces fonctions se trouvent toutes deux" +#~ " dans le même module :code:`quickstart-" +#~ "pytorch.mnist` :" #~ msgid "" -#~ "Running Flower simulations still require " -#~ "you to define your client class, a" -#~ " strategy, and utility functions to " -#~ "download and load (and potentially " -#~ "partition) your dataset. With that out" -#~ " of the way, launching your " -#~ "simulation is done with `start_simulation " -#~ "`_ " -#~ "and a minimal example looks as " -#~ "follows:" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" #~ msgstr "" +#~ "Observe que ces fonctions encapsulent " +#~ "les boucles d'entraînement et de test" +#~ " habituelles et fournissent à :code:`fit`" +#~ " et :code:`evaluate` les statistiques " +#~ "finales pour chaque tour. Tu pourrais" +#~ " les remplacer par tes boucles " +#~ "d'entraînement et de test personnalisées " +#~ "et changer l'architecture du réseau, et" +#~ " l'ensemble de l'exemple fonctionnerait " +#~ "toujours parfaitement. En fait, pourquoi " +#~ "ne pas essayer de modifier le code" +#~ " pour en faire un exemple qui " +#~ "te plairait ?" -#~ msgid "VirtualClientEngine resources" -#~ msgstr "Moteur de client virtuel" +#~ msgid "Give It a Try" +#~ msgstr "Fais un essai" #~ msgid "" -#~ "By default the VCE has access to" -#~ " all system resources (i.e. all CPUs," -#~ " all GPUs, etc) since that is " -#~ "also the default behavior when starting" -#~ " Ray. However, in some settings you" -#~ " might want to limit how many " -#~ "of your system resources are used " -#~ "for simulation. You can do this " -#~ "via the :code:`ray_init_args` input argument" -#~ " to :code:`start_simulation` which the VCE" -#~ " internally passes to Ray's " -#~ ":code:`ray.init` command. For a complete " -#~ "list of settings you can configure " -#~ "check the `ray.init `_ " -#~ "documentation. Do not set " -#~ ":code:`ray_init_args` if you want the " -#~ "VCE to use all your system's CPUs" -#~ " and GPUs." -#~ msgstr "" - -#~ msgid "Assigning client resources" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" #~ msgstr "" +#~ "En parcourant la description du code " +#~ "de démarrage rapide ci-dessus, tu " +#~ "auras acquis une bonne compréhension du" +#~ " fonctionnement des *clients* et des " +#~ "*serveurs* dans Flower, de l'exécution " +#~ "d'une expérience simple et de la " +#~ "structure interne d'un wrapper client. " +#~ "Voici quelques exemples que tu peux " +#~ "essayer par toi-même pour acquérir " +#~ "plus d'expérience avec Flower :" #~ msgid "" -#~ "By default the :code:`VirtualClientEngine` " -#~ "assigns a single CPU core (and " -#~ "nothing else) to each virtual client." -#~ " This means that if your system " -#~ "has 10 cores, that many virtual " -#~ "clients can be concurrently running." +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." #~ msgstr "" +#~ "Essaie de modifier :code:`PytorchMNISTClient` " +#~ "pour qu'il puisse accepter différentes " +#~ "architectures." #~ msgid "" -#~ "More often than not, you would " -#~ "probably like to adjust the resources" -#~ " your clients get assigned based on" -#~ " the complexity (i.e. compute and " -#~ "memory footprint) of your FL workload." -#~ " You can do so when starting " -#~ "your simulation by setting the argument" -#~ " `client_resources` to `start_simulation `_. Two " -#~ "keys are internally used by Ray to" -#~ " schedule and spawn workloads (in our" -#~ " case Flower clients):" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" #~ msgstr "" +#~ "Modifie la fonction :code:`train` pour " +#~ "qu'elle accepte différents optimiseurs" -#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" #~ msgstr "" +#~ "Modifie la fonction :code:`test` pour " +#~ "qu'elle prouve non seulement le top-1" +#~ " (précision normale) mais aussi le " +#~ "top-5 ?" #~ msgid "" -#~ ":code:`num_gpus` indicates the **ratio** of" -#~ " GPU memory a client gets assigned." +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" #~ msgstr "" +#~ "Essaie d'adapter le code à des " +#~ "images et à des ensembles de " +#~ "données plus grands. Pourquoi ne pas " +#~ "essayer de s'entraîner sur ImageNet avec" +#~ " un ResNet-50 ?" -#~ msgid "Let's see a few examples:" -#~ msgstr "" +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" #~ msgid "" -#~ "While the :code:`client_resources` can be " -#~ "used to control the degree of " -#~ "concurrency in your FL simulation, this" -#~ " does not stop you from running " -#~ "dozens, hundreds or even thousands of" -#~ " clients in the same round and " -#~ "having orders of magnitude more " -#~ "`dormant` (i.e. not participating in a" -#~ " round) clients. Let's say you want" -#~ " to have 100 clients per round " -#~ "but your system can only accommodate " -#~ "8 clients concurrently. The " -#~ ":code:`VirtualClientEngine` will schedule 100 " -#~ "jobs to run (each simulating a " -#~ "client sampled by the strategy) and " -#~ "then will execute them in a " -#~ "resource-aware manner in batches of " -#~ "8." +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." #~ msgstr "" +#~ "Flower fournit des classes d'enveloppe " +#~ "de confidentialité différentielle (DP) pour" +#~ " l'intégration facile des garanties " +#~ "centrales de DP fournies par DP-" +#~ "FedAvg dans les pipelines de formation" +#~ " définis dans n'importe lequel des " +#~ "divers cadres de ML avec lesquels " +#~ "Flower est compatible." #~ msgid "" -#~ "To understand all the intricate details" -#~ " on how resources are used to " -#~ "schedule FL clients and how to " -#~ "define custom resources, please take a" -#~ " look at the `Ray documentation " -#~ "`_." +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." #~ msgstr "" - -#~ msgid "Simulation examples" -#~ msgstr "Exemples de PyTorch" +#~ "Note que ces composants sont encore " +#~ "expérimentaux, la configuration correcte du" +#~ " DP pour une tâche spécifique est " +#~ "encore un problème non résolu." #~ msgid "" -#~ "A few ready-to-run complete " -#~ "examples for Flower simulation in " -#~ "Tensorflow/Keras and PyTorch are provided " -#~ "in the `Flower repository " -#~ "`_. You can run " -#~ "them on Google Colab too:" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." #~ msgstr "" +#~ "Le nom DP-FedAvg est trompeur car" +#~ " il peut être appliqué à n'importe" +#~ " quel algorithme FL qui se conforme" +#~ " à la structure générale prescrite " +#~ "par la famille d'algorithmes FedOpt." + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" #~ msgid "" -#~ "`Tensorflow/Keras Simulation " -#~ "`_: 100 clients collaboratively " -#~ "train a MLP model on MNIST." +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." #~ msgstr "" -#~ "`Quickstart TensorFlow (Code) " -#~ "`_" +#~ "DP-FedAvg, proposé à l'origine par " +#~ "McMahan et al. [mcmahan]_ et étendu " +#~ "par Andrew et al. [andrew]_, est " +#~ "essentiellement FedAvg avec les modifications" +#~ " suivantes." #~ msgid "" -#~ "`PyTorch Simulation " -#~ "`_: 100 clients collaboratively train" -#~ " a CNN model on MNIST." +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." #~ msgstr "" -#~ "`Quickstart PyTorch (Code) " -#~ "`_" +#~ "**Clipping** : L'influence de la mise" +#~ " à jour de chaque client est " +#~ "limitée en l'écrêtant. Ceci est réalisé" +#~ " en imposant un plafond à la " +#~ "norme L2 de la mise à jour, " +#~ "en la réduisant si nécessaire." #~ msgid "" -#~ "Flower's :code:`VirtualClientEngine` allows you " -#~ "to run FL simulations across multiple" -#~ " compute nodes. Before starting your " -#~ "multi-node simulation ensure that you:" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." #~ msgstr "" +#~ "**Bruit** : un bruit gaussien, calibré" +#~ " sur le seuil d'écrêtage, est ajouté" +#~ " à la moyenne calculée au niveau " +#~ "du serveur." -#~ msgid "Have the same Python environment in all nodes." +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." #~ msgstr "" +#~ "Il a été démontré que la " +#~ "distribution de la norme de mise à" +#~ " jour varie d'une tâche à l'autre " +#~ "et évolue au fur et à mesure " +#~ "de la formation. C'est pourquoi nous " +#~ "utilisons une approche adaptative [andrew]_" +#~ " qui ajuste continuellement le seuil " +#~ "d'écrêtage pour suivre un quantile " +#~ "prédéfini de la distribution de la " +#~ "norme de mise à jour." -#~ msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -#~ msgstr "" +#~ msgid "Simplifying Assumptions" +#~ msgstr "Simplifier les hypothèses" #~ msgid "" -#~ "Have a copy of your dataset in " -#~ "all nodes (more about this in " -#~ ":ref:`simulation considerations `)" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." #~ msgstr "" +#~ "Nous formulons (et tentons d'appliquer) " +#~ "un certain nombre d'hypothèses qui " +#~ "doivent être satisfaites pour que le " +#~ "processus de formation réalise réellement " +#~ "les garanties :math:`(\\epsilon, \\delta)` que" +#~ " l'utilisateur a à l'esprit lorsqu'il " +#~ "configure l'installation." #~ msgid "" -#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " -#~ "`start_simulation `_ so the " -#~ ":code:`VirtualClientEngine` attaches to a " -#~ "running Ray instance." +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." #~ msgstr "" +#~ "**Sous-échantillonnage de taille fixe** " +#~ ":Des sous-échantillons de taille fixe" +#~ " des clients doivent être prélevés à" +#~ " chaque tour, par opposition aux " +#~ "sous-échantillons de Poisson de taille " +#~ "variable." #~ msgid "" -#~ "Start Ray on you head node: on " -#~ "the terminal type :code:`ray start " -#~ "--head`. This command will print a " -#~ "few lines, one of which indicates " -#~ "how to attach other nodes to the" -#~ " head node." +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." #~ msgstr "" +#~ "**Moyenne non pondérée** : Les " +#~ "contributions de tous les clients " +#~ "doivent être pondérées de façon égale" +#~ " dans l'ensemble afin que le serveur" +#~ " n'ait pas à connaître à l'avance " +#~ "la somme des poids de tous les " +#~ "clients disponibles pour la sélection." #~ msgid "" -#~ "Attach other nodes to the head " -#~ "node: copy the command shown after " -#~ "starting the head and execute it " -#~ "on terminal of a new node: for " -#~ "example :code:`ray start " -#~ "--address='192.168.1.132:6379'`" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." #~ msgstr "" +#~ "**Aucune défaillance de client** : " +#~ "L'ensemble des clients disponibles doit " +#~ "rester constant pendant toutes les " +#~ "séries de formation. En d'autres termes," +#~ " les clients ne peuvent pas " +#~ "abandonner ou échouer." #~ msgid "" -#~ "With all the above done, you can" -#~ " run your code from the head " -#~ "node as you would if the " -#~ "simulation was running on a single " -#~ "node." +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." #~ msgstr "" +#~ "Les deux premiers sont utiles pour " +#~ "éliminer une multitude de complications " +#~ "liées au calibrage du bruit en " +#~ "fonction du seuil d'écrêtage, tandis que" +#~ " le troisième est nécessaire pour se" +#~ " conformer aux hypothèses de l'analyse " +#~ "de la vie privée." #~ msgid "" -#~ "Once your simulation is finished, if " -#~ "you'd like to dismantle your cluster " -#~ "you simply need to run the command" -#~ " :code:`ray stop` in each node's " -#~ "terminal (including the head node)." +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." #~ msgstr "" +#~ "Ces restrictions sont conformes aux " +#~ "contraintes imposées par Andrew et al." +#~ " [andrew]_." -#~ msgid "Multi-node simulation good-to-know" +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." #~ msgstr "" +#~ "Contrairement à d'autres implémentations où" +#~ " l'ajout de bruit est effectué au " +#~ "niveau du serveur, tu peux configurer" +#~ " le site d'injection de bruit pour" +#~ " qu'il corresponde mieux à ton modèle" +#~ " de menace. Nous offrons aux " +#~ "utilisateurs la possibilité de configurer " +#~ "l'entraînement de telle sorte que chaque" +#~ " client ajoute indépendamment une petite" +#~ " quantité de bruit à la mise à" +#~ " jour écrêtée, ce qui fait que " +#~ "le simple fait d'agréger les mises " +#~ "à jour bruyantes équivaut à l'ajout " +#~ "explicite de bruit à l'agrégat non " +#~ "bruyant au niveau du serveur." #~ msgid "" -#~ "Here we list a few interesting " -#~ "functionality when running multi-node FL" -#~ " simulations:" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." #~ msgstr "" +#~ "Pour être précis, si nous laissons " +#~ ":math:`m` être le nombre de clients " +#~ "échantillonnés à chaque tour et " +#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" +#~ " gaussien total qui doit être ajouté" +#~ " à la somme des mises à jour" +#~ " du modèle, nous pouvons utiliser des" +#~ " mathématiques simples pour montrer que " +#~ "cela équivaut à ce que chaque " +#~ "client ajoute du bruit avec l'échelle" +#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." + +#~ msgid "Wrapper-based approach" +#~ msgstr "Approche basée sur l'enveloppe" #~ msgid "" -#~ "User :code:`ray status` to check all " -#~ "nodes connected to your head node " -#~ "as well as the total resources " -#~ "available to the :code:`VirtualClientEngine`." +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." #~ msgstr "" +#~ "L'introduction du DP dans une charge " +#~ "de travail existante peut être " +#~ "considérée comme l'ajout d'une couche de" +#~ " sécurité supplémentaire autour d'elle. " +#~ "Cela nous a incités à fournir la" +#~ " logique supplémentaire côté serveur et " +#~ "côté client nécessaire pour rendre le" +#~ " processus de formation différentiellement " +#~ "privé en tant qu'enveloppes pour les " +#~ "instances des classes abstraites " +#~ ":code:`Strategy` et :code:`NumPyClient` " +#~ "respectivement. Cette approche basée sur " +#~ "l'enveloppe a l'avantage d'être facilement " +#~ "composable avec d'autres enveloppes que " +#~ "quelqu'un pourrait contribuer à la " +#~ "bibliothèque Flower à l'avenir, par " +#~ "exemple, pour l'agrégation sécurisée. " +#~ "L'utilisation de l'héritage à la place" +#~ " peut être fastidieuse car cela " +#~ "nécessiterait la création de nouvelles " +#~ "sous-classes chaque fois qu'une nouvelle" +#~ " classe mettant en œuvre :code:`Strategy`" +#~ " ou :code:`NumPyClient` est définie." #~ msgid "" -#~ "When attaching a new node to the" -#~ " head, all its resources (i.e. all" -#~ " CPUs, all GPUs) will be visible " -#~ "by the head node. This means that" -#~ " the :code:`VirtualClientEngine` can schedule " -#~ "as many `virtual` clients as that " -#~ "node can possible run. In some " -#~ "settings you might want to exclude " -#~ "certain resources from the simulation. " -#~ "You can do this by appending " -#~ "`--num-cpus=` and/or `--num-" -#~ "gpus=` in any :code:`ray " -#~ "start` command (including when starting " -#~ "the head)" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." #~ msgstr "" - -#~ msgid "Considerations for simulations" -#~ msgstr "Simulation de moniteur" +#~ "La première version de notre solution" +#~ " consistait à définir un décorateur " +#~ "dont le constructeur acceptait, entre " +#~ "autres, une variable à valeur booléenne" +#~ " indiquant si l'écrêtage adaptatif devait" +#~ " être activé ou non. Nous nous " +#~ "sommes rapidement rendu compte que cela" +#~ " encombrerait sa fonction :code:`__init__()` " +#~ "avec des variables correspondant aux " +#~ "hyperparamètres de l'écrêtage adaptatif qui" +#~ " resteraient inutilisées lorsque celui-ci" +#~ " était désactivé. Une implémentation plus" +#~ " propre pourrait être obtenue en " +#~ "divisant la fonctionnalité en deux " +#~ "décorateurs, :code:`DPFedAvgFixed` et " +#~ ":code:`DPFedAvgAdaptive`, le second sous-" +#~ "classant le premier. Les constructeurs " +#~ "des deux classes acceptent un paramètre" +#~ " booléen :code:`server_side_noising` qui, comme" +#~ " son nom l'indique, détermine l'endroit " +#~ "où le noising doit être effectué." #~ msgid "" -#~ "We are actively working on these " -#~ "fronts so to make it trivial to" -#~ " run any FL workload with Flower " -#~ "simulation." +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." #~ msgstr "" +#~ "Les capacités côté serveur requises pour" +#~ " la version originale de DP-FedAvg," +#~ " c'est-à-dire celle qui effectue un " +#~ "écrêtage fixe, peuvent être entièrement " +#~ "capturées à l'aide d'une logique " +#~ "d'enveloppement pour les deux méthodes " +#~ "suivantes de la classe abstraite " +#~ ":code:`Strategy`." #~ msgid "" -#~ "The current VCE allows you to run" -#~ " Federated Learning workloads in simulation" -#~ " mode whether you are prototyping " -#~ "simple scenarios on your personal laptop" -#~ " or you want to train a complex" -#~ " FL pipeline across multiple high-" -#~ "performance GPU nodes. While we add " -#~ "more capabilities to the VCE, the " -#~ "points below highlight some of the " -#~ "considerations to keep in mind when " -#~ "designing your FL pipeline with Flower." -#~ " We also highlight a couple of " -#~ "current limitations in our implementation." +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." #~ msgstr "" - -#~ msgid "GPU resources" -#~ msgstr "Ressources" +#~ ":code:`configure_fit()` : Le dictionnaire de" +#~ " configuration envoyé par la " +#~ ":code:`Strategy` enveloppée à chaque client" +#~ " doit être augmenté d'une valeur " +#~ "supplémentaire égale au seuil d'écrêtage " +#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " +#~ "si :code:`server_side_noising=true`, d'une autre " +#~ "égale à l'échelle du bruit gaussien " +#~ "qui doit être ajouté au client " +#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." #~ msgid "" -#~ "The VCE assigns a share of GPU " -#~ "memory to a client that specifies " -#~ "the key :code:`num_gpus` in " -#~ ":code:`client_resources`. This being said, Ray" -#~ " (used internally by the VCE) is " -#~ "by default:" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." #~ msgstr "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." #~ msgid "" -#~ "not aware of the total VRAM " -#~ "available on the GPUs. This means " -#~ "that if you set :code:`num_gpus=0.5` and" -#~ " you have two GPUs in your " -#~ "system with different (e.g. 32GB and " -#~ "8GB) VRAM amounts, they both would " -#~ "run 2 clients concurrently." +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." #~ msgstr "" +#~ "Nous ne pouvons pas modifier directement" +#~ " la fonction d'agrégation de la " +#~ "stratégie enveloppée pour la forcer à" +#~ " ajouter du bruit à l'agrégat, c'est" +#~ " pourquoi nous simulons le bruit côté" +#~ " client pour mettre en œuvre le " +#~ "bruit côté serveur." #~ msgid "" -#~ "not aware of other unrelated (i.e. " -#~ "not created by the VCE) workloads " -#~ "are running on the GPU. Two " -#~ "takeaways from this are:" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." #~ msgstr "" +#~ "Ces modifications ont été regroupées " +#~ "dans une classe appelée :code:`DPFedAvgFixed`," +#~ " dont le constructeur accepte la " +#~ "stratégie décorée, le seuil d'écrêtage " +#~ "et le nombre de clients échantillonnés" +#~ " à chaque tour comme arguments " +#~ "obligatoires. L'utilisateur est censé " +#~ "spécifier le seuil d'écrêtage car " +#~ "l'ordre de grandeur des normes de " +#~ "mise à jour dépend fortement du " +#~ "modèle formé et fournir une valeur " +#~ "par défaut serait trompeur. Le nombre" +#~ " de clients échantillonnés à chaque " +#~ "tour est nécessaire pour calculer la " +#~ "quantité de bruit qui doit être " +#~ "ajoutée à chaque mise à jour " +#~ "individuelle, que ce soit par le " +#~ "serveur ou par les clients." #~ msgid "" -#~ "Your Flower server might need a " -#~ "GPU to evaluate the `global model` " -#~ "after aggregation (by instance when " -#~ "making use of the `evaluate method " -#~ "`_)" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." #~ msgstr "" +#~ "La fonctionnalité supplémentaire nécessaire " +#~ "pour faciliter l'écrêtage adaptatif a " +#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" +#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" +#~ " remplace les méthodes mentionnées ci-" +#~ "dessus pour effectuer les opérations " +#~ "suivantes." #~ msgid "" -#~ "If you want to run several " -#~ "independent Flower simulations on the " -#~ "same machine you need to mask-out" -#~ " your GPUs with " -#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" -#~ " your experiment." +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." #~ msgstr "" +#~ ":code:`configure_fit()` : Il intercepte le " +#~ "dict de configuration renvoyé par " +#~ ":code:`super.configure_fit()` pour y ajouter " +#~ "la paire clé-valeur " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " +#~ "client interprète comme une instruction " +#~ "d'inclure un bit indicateur (1 si " +#~ "la norme de mise à jour <= " +#~ "seuil d'écrêtage, 0 sinon) dans les " +#~ "résultats qu'il renvoie." #~ msgid "" -#~ "In addition, the GPU resource limits " -#~ "passed to :code:`client_resources` are not " -#~ "`enforced` (i.e. they can be exceeded)" -#~ " which can result in the situation" -#~ " of client using more VRAM than " -#~ "the ratio specified when starting the" -#~ " simulation." +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." #~ msgstr "" - -#~ msgid "TensorFlow with GPUs" -#~ msgstr "Exemples de TensorFlow" +#~ ":code:`aggregate_fit()` : Il fait suivre " +#~ "un appel à :code:`super.aggregate_fit()` d'un" +#~ " appel à :code:`__update_clip_norm__()`, une " +#~ "procédure qui ajuste le seuil d'écrêtage" +#~ " sur la base des bits indicateurs " +#~ "reçus des clients échantillonnés." #~ msgid "" -#~ "When `using a GPU with TensorFlow " -#~ "`_ nearly your " -#~ "entire GPU memory of all your GPUs" -#~ " visible to the process will be " -#~ "mapped. This is done by TensorFlow " -#~ "for optimization purposes. However, in " -#~ "settings such as FL simulations where" -#~ " we want to split the GPU into" -#~ " multiple `virtual` clients, this is " -#~ "not a desirable mechanism. Luckily we" -#~ " can disable this default behavior by" -#~ " `enabling memory growth " -#~ "`_." +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." #~ msgstr "" +#~ "Les capacités requises côté client " +#~ "peuvent être entièrement capturées par " +#~ "une logique de wrapper pour la " +#~ "seule méthode :code:`fit()` de la classe" +#~ " abstraite :code:`NumPyClient`. Pour être " +#~ "précis, nous devons *post-traiter* la" +#~ " mise à jour calculée par le " +#~ "client wrapped pour l'écrêter, si " +#~ "nécessaire, à la valeur seuil fournie" +#~ " par le serveur dans le cadre " +#~ "du dictionnaire de configuration. En " +#~ "plus de cela, il peut avoir besoin" +#~ " d'effectuer un travail supplémentaire si" +#~ " l'une des clés suivantes (ou les " +#~ "deux) est également présente dans le " +#~ "dict." #~ msgid "" -#~ "This would need to be done in " -#~ "the main process (which is where " -#~ "the server would run) and in each" -#~ " Actor created by the VCE. By " -#~ "means of :code:`actor_kwargs` we can " -#~ "pass the reserved key `\"on_actor_init_fn\"`" -#~ " in order to specify a function " -#~ "to be executed upon actor " -#~ "initialization. In this case, to enable" -#~ " GPU growth for TF workloads. It " -#~ "would look as follows:" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." #~ msgstr "" +#~ ":code:`dpfedavg_noise_stddev` : Génère et " +#~ "ajoute la quantité de bruit spécifiée" +#~ " à la mise à jour de " +#~ "l'écrêtage." #~ msgid "" -#~ "This is precisely the mechanism used " -#~ "in `Tensorflow/Keras Simulation " -#~ "`_ example." +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." #~ msgstr "" -#~ "`Quickstart TensorFlow (Code) " -#~ "`_" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " +#~ "les métriques dict dans l'objet " +#~ ":code:`FitRes` renvoyé au serveur avec " +#~ "un bit indicateur, calculé comme décrit" +#~ " précédemment." -#~ msgid "Multi-node setups" -#~ msgstr "" +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" #~ msgid "" -#~ "The VCE does not currently offer a" -#~ " way to control on which node a" -#~ " particular `virtual` client is executed." -#~ " In other words, if more than a" -#~ " single node have the resources " -#~ "needed by a client to run, then" -#~ " any of those nodes could get " -#~ "the client workload scheduled onto. " -#~ "Later in the FL process (i.e. in" -#~ " a different round) the same client" -#~ " could be executed by a different " -#~ "node. Depending on how your clients " -#~ "access their datasets, this might " -#~ "require either having a copy of " -#~ "all dataset partitions on all nodes " -#~ "or a dataset serving mechanism (e.g. " -#~ "using nfs, a database) to circumvent " -#~ "data duplication." +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." #~ msgstr "" +#~ "Supposons que tu te sois entraîné " +#~ "pendant :math:`n` tours avec la fraction" +#~ " d'échantillonnage :math:`q` et le " +#~ "multiplicateur de bruit :math:`z`. Afin " +#~ "de calculer la valeur :math:`epsilon` " +#~ "qui en résulterait pour un " +#~ ":math:`\\delta` particulier, le script suivant" +#~ " peut être utilisé." #~ msgid "" -#~ "By definition virtual clients are " -#~ "`stateless` due to their ephemeral " -#~ "nature. A client state can be " -#~ "implemented as part of the Flower " -#~ "client class but users need to " -#~ "ensure this saved to persistent storage" -#~ " (e.g. a database, disk) and that " -#~ "can be retrieve later by the same" -#~ " client regardless on which node it" -#~ " is running from. This is related " -#~ "to the point above also since, in" -#~ " some way, the client's dataset could" -#~ " be seen as a type of `state`." +#~ "`How to run Flower using Docker " +#~ "`_" #~ msgstr "" -#~ msgid "Save and load model checkpoints" -#~ msgstr "Sauvegarde et chargement des points de contrôle PyTorch" +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" #~ msgid "" -#~ "Flower does not automatically save model" -#~ " updates on the server-side. This " -#~ "how-to guide describes the steps " -#~ "to save (and load) model checkpoints " -#~ "in Flower." +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" #~ msgstr "" -#~ msgid "Legacy example guides" +#~ msgid ":py:obj:`flwr.server.driver `\\" #~ msgstr "" -#~ msgid "Contributor tutorials" -#~ msgstr "Configuration du contributeur" - -#~ msgid "Contributor explanations" -#~ msgstr "Explications" - -#~ msgid "Flower Framework Documentation" -#~ msgstr "Documentation de Flower" - -#~ msgid "PyTorch" -#~ msgstr "Exemples de PyTorch" - -#~ msgid "TensorFlow" -#~ msgstr "TensorFlow" - -#~ msgid "Flower CLI reference" -#~ msgstr "Client de Flower" - -#~ msgid "flwr (Python API reference)" -#~ msgstr "Référence pour l'API" - -#~ msgid "Unreleased" -#~ msgstr "Inédit" - -#~ msgid "**Deprecate Python 3.7**" -#~ msgstr "**Deprecate Python 3.7**" +#~ msgid "Flower driver SDK." +#~ msgstr "Serveur de Flower" -#~ msgid "" -#~ "Since Python 3.7 reached its end " -#~ "of life (EOL) on 2023-06-27, support " -#~ "for Python 3.7 is now deprecated " -#~ "and will be removed in an upcoming" -#~ " release." -#~ msgstr "" -#~ "Étant donné que Python 3.7 a " -#~ "atteint sa fin de vie (EOL) le " -#~ "2023-06-27, la prise en charge de " -#~ "Python 3.7 est désormais dépréciée et" -#~ " sera supprimée dans une prochaine " -#~ "version." +#~ msgid "driver" +#~ msgstr "serveur" #~ msgid "" -#~ "**Add new** `FedTrimmedAvg` **strategy** " -#~ "([#1769](https://github.com/adap/flower/pull/1769), " -#~ "[#1853](https://github.com/adap/flower/pull/1853))" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ "**Ajouter un nouveau** `FedTrimmedAvg` " -#~ "**stratégie** " -#~ "([#1769](https://github.com/adap/flower/pull/1769), " -#~ "[#1853](https://github.com/adap/flower/pull/1853))" #~ msgid "" -#~ "The new `FedTrimmedAvg` strategy implements" -#~ " Trimmed Mean by [Dong Yin, " -#~ "2018](https://arxiv.org/abs/1803.01498)" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ "La nouvelle stratégie `FedTrimmedAvg` met " -#~ "en œuvre la moyenne trimmée par " -#~ "[Dong Yin, 2018](https://arxiv.org/abs/1803.01498)" #~ msgid "" -#~ "**Add parameter aggregation to** `mt-" -#~ "pytorch` **code example** " -#~ "([#1785](https://github.com/adap/flower/pull/1785))" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" -#~ "**Ajouter l'agrégation des paramètres à** " -#~ "`mt-pytorch` **exemple de code** " -#~ "([#1785](https://github.com/adap/flower/pull/1785))" -#~ msgid "" -#~ "The `mt-pytorch` example shows how " -#~ "to aggregate parameters when writing a" -#~ " driver script. The included `driver.py`" -#~ " and `server.py` have been aligned to" -#~ " demonstrate both the low-level way" -#~ " and the high-level way of " -#~ "building server-side logic." +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." #~ msgstr "" -#~ "L'exemple `mt-pytorch` montre comment " -#~ "agréger des paramètres lors de " -#~ "l'écriture d'un script de pilote. Les" -#~ " fichiers `driver.py` et `server.py` inclus" -#~ " ont été alignés pour démontrer à " -#~ "la fois la manière de bas niveau" -#~ " et la manière de haut niveau " -#~ "de construire la logique côté serveur." -#~ msgid "" -#~ "**Introduce (experimental) gRPC request-" -#~ "response API** " -#~ "([#1867](https://github.com/adap/flower/pull/1867), " -#~ "[#1901](https://github.com/adap/flower/pull/1901))" +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" #~ msgstr "" -#~ "**Introduire l'API demande-réponse gRPC " -#~ "(expérimentale)** " -#~ "([#1867](https://github.com/adap/flower/pull/1867), " -#~ "[#1901](https://github.com/adap/flower/pull/1901))" #~ msgid "" -#~ "In addition to the existing gRPC " -#~ "API (based on bidirectional streaming) " -#~ "and the experimental REST API, there " -#~ "is now a new gRPC API that " -#~ "uses a request-response model to " -#~ "communicate with client nodes." +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" #~ msgstr "" -#~ "En plus de l'API gRPC existante " -#~ "(basée sur un flux bidirectionnel) et" -#~ " de l'API REST expérimentale, il " -#~ "existe désormais une nouvelle API gRPC" -#~ " qui utilise un modèle demande-" -#~ "réponse pour communiquer avec les nœuds" -#~ " clients." -#~ msgid "" -#~ "Please note: The gRPC request-response" -#~ " API is still experimental and will" -#~ " likely change significantly over time." +#~ msgid "Get task results." #~ msgstr "" -#~ "Remarque : l'API requête-réponse gRPC" -#~ " est encore expérimentale et est " -#~ "susceptible de changer de manière " -#~ "significative au fil du temps." #~ msgid "" -#~ "**Replace the eperimental** " -#~ "`start_client(rest=True)` **with the new** " -#~ "`start_client(transport=\"rest\")` " -#~ "([#1880](https://github.com/adap/flower/pull/1880))" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" #~ msgstr "" -#~ "**Remplacez le fichier expérimental** " -#~ "`start_client(rest=True) **par le nouveau** " -#~ "`start_client(transport=\"rest\")` " -#~ "([#1880](https://github.com/adap/flower/pull/1880))" -#~ msgid "" -#~ "The (experimental) `start_client` argument " -#~ "`rest` was deprecated in favor of " -#~ "a new argument `transport`. " -#~ "`start_client(transport=\"rest\")` will yield the" -#~ " same behaviour as `start_client(rest=True)` " -#~ "did before. All code should migrate " -#~ "to the new argument `transport`. The " -#~ "deprecated argument `rest` will be " -#~ "removed in a future release." +#~ msgid "Schedule tasks." #~ msgstr "" -#~ msgid "" -#~ "**Migrate experimental REST API to " -#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." #~ msgstr "" -#~ "**Migrer l'API REST expérimentale vers " -#~ "Starlette** ([2171](https://github.com/adap/flower/pull/2171))" #~ msgid "" -#~ "The (experimental) REST API used to " -#~ "be implemented in " -#~ "[FastAPI](https://fastapi.tiangolo.com/), but it has" -#~ " now been migrated to use " -#~ "[Starlette](https://www.starlette.io/) directly." +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" #~ msgstr "" -#~ "L'API REST (expérimentale) était auparavant" -#~ " implémentée dans " -#~ "[FastAPI](https://fastapi.tiangolo.com/), mais elle " -#~ "a maintenant été migrée pour utiliser" -#~ " directement [Starlette](https://www.starlette.io/)." + +#~ msgid "Request for run ID." +#~ msgstr "Demande pour une nouvelle Flower Baseline" #~ msgid "" -#~ "**Add a new gRPC option** " -#~ "([#2197](https://github.com/adap/flower/pull/2197))" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." #~ msgstr "" -#~ "**Ajouter une nouvelle option gRPC** " -#~ "([#2197](https://github.com/adap/flower/pull/2197))" #~ msgid "" -#~ "We now start a gRPC server with" -#~ " the `grpc.keepalive_permit_without_calls` option " -#~ "set to 0 by default. This prevents" -#~ " the clients from sending keepalive " -#~ "pings when there is no outstanding " -#~ "stream." +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" #~ msgstr "" -#~ "Nous démarrons maintenant un serveur " -#~ "gRPC avec l'option " -#~ "`grpc.keepalive_permit_without_calls` réglée sur 0" -#~ " par défaut, ce qui empêche les " -#~ "clients d'envoyer des pings de maintien" -#~ " lorsqu'il n'y a pas de flux en" -#~ " attente." + +#~ msgid "Get client IDs." +#~ msgstr "Moteur client Edge" #~ msgid "" -#~ "**General improvements** " -#~ "([#1872](https://github.com/adap/flower/pull/1872), " -#~ "[#1866](https://github.com/adap/flower/pull/1866), " -#~ "[#1884](https://github.com/adap/flower/pull/1884))" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" #~ msgstr "" -#~ "**Mettre à jour les exemples de " -#~ "code** ([#1291](https://github.com/adap/flower/pull/1291), " -#~ "[#1286](https://github.com/adap/flower/pull/1286), " -#~ "[#1282](https://github.com/adap/flower/pull/1282))" -#~ msgid "Example projects" -#~ msgstr "Exemples" +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" #~ msgid "" -#~ "`Flower simulation PyTorch " -#~ "`_" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." #~ msgstr "" -#~ "`Flower Quickstart (TensorFlow/Keras) " -#~ "`_" #~ msgid "" -#~ "`Android Kotlin example " -#~ "`_" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." #~ msgstr "" -#~ msgid "`Android Java example `_" +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." #~ msgstr "" +#~ "Les exemples d'utilisation de Flower " +#~ "étaient auparavant regroupés avec Flower " +#~ "dans un paquet appelé ``flwr_example``. " +#~ "Nous migrons ces exemples vers des " +#~ "projets autonomes pour les rendre plus" +#~ " faciles à utiliser. Tous les " +#~ "nouveaux exemples sont basés dans le " +#~ "répertoire ``examples " +#~ "`_." -#~ msgid "Build a strategy from scratch" -#~ msgstr "Élaborer une stratégie à partir de zéro" +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "Démarrage rapide de TensorFlow/Keras" -#~ msgid "Customize the client" -#~ msgstr "Création du client IMDBC" +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "Exemples hérités (`flwr_example`)" -#~ msgid "Get started with Flower" +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." #~ msgstr "" +#~ "Les exemples d'utilisation dans `flwr_example`" +#~ " sont obsolètes et seront supprimés à" +#~ " l'avenir. De nouveaux exemples sont " +#~ "fournis en tant que projets autonomes" +#~ " dans `examples " +#~ "`_." + +#~ msgid "Extra Dependencies" +#~ msgstr "Dépendances supplémentaires" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Le noyau du framework Flower conserve" +#~ " un ensemble minimal de dépendances. " +#~ "Les exemples démontrent Flower dans le" +#~ " contexte de différents frameworks " +#~ "d'apprentissage automatique, de sorte que " +#~ "des dépendances supplémentaires doivent être" +#~ " installées avant qu'un exemple puisse " +#~ "être exécuté." + +#~ msgid "For PyTorch examples::" +#~ msgstr "Pour les exemples de PyTorch: :" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "Pour les exemples de TensorFlow : :" -#~ msgid "Quickstart Android" -#~ msgstr "Démarrage rapide d'Android" +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" #~ msgid "" -#~ "Let's build a federated learning system" -#~ " using TFLite and Flower on Android!" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " en utilisant TFLite et Flower sur" -#~ " Android !" +#~ "Tu peux consulter :code:`pyproject.toml` pour" +#~ " une liste complète des extras " +#~ "possibles (section :code:`[tool.poetry.extras]`)." + +#~ msgid "PyTorch Examples" +#~ msgstr "Exemples de PyTorch" #~ msgid "" -#~ "Please refer to the `full code " -#~ "example " -#~ "`_ to" -#~ " learn more." +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." #~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet " -#~ "`_ " -#~ "pour en savoir plus." +#~ "Nos exemples PyTorch sont basés sur " +#~ "PyTorch 1.7. Ils devraient fonctionner " +#~ "avec d'autres versions également. Jusqu'à " +#~ "présent, nous fournissons les exemples " +#~ "suivants." -#~ msgid "Quickstart iOS" -#~ msgstr "Démarrage rapide iOS" +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "Classification d'images CIFAR-10" #~ msgid "" -#~ "In this tutorial we will learn how" -#~ " to train a Neural Network on " -#~ "MNIST using Flower and CoreML on " -#~ "iOS devices." +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un réseau neuronal sur " -#~ "MNIST en utilisant Flower et CoreML " -#~ "sur les appareils iOS." +#~ "`CIFAR-10 et CIFAR-100 " +#~ "`_ sont des" +#~ " ensembles de données d'images RVB " +#~ "populaires. L'exemple Flower CIFAR-10 utilise" +#~ " PyTorch pour former un classificateur " +#~ "CNN simple dans une configuration " +#~ "d'apprentissage fédéré avec deux clients." -#~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." -#~ msgstr "" -#~ "Tout d'abord, pour l'exécution du " -#~ "serveur Flower Python, il est recommandé" -#~ " de créer un environnement virtuel et" -#~ " de tout exécuter au sein d'un " -#~ "`virtualenv `_. Pour l'implémentation du client" -#~ " Flower dans iOS, il est recommandé" -#~ " d'utiliser Xcode comme notre IDE." +#~ msgid "First, start a Flower server:" +#~ msgstr "Tout d'abord, démarre un serveur Flower :" -#~ msgid "" -#~ "Our example consists of one Python " -#~ "*server* and two iPhone *clients* that" -#~ " all have the same model." -#~ msgstr "" -#~ "Notre exemple se compose d'un *serveur*" -#~ " Python et de deux *clients* iPhone" -#~ " qui ont tous le même modèle." +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgid "" -#~ "*Clients* are responsible for generating " -#~ "individual weight updates for the model" -#~ " based on their local datasets. These" -#~ " updates are then sent to the " -#~ "*server* which will aggregate them to" -#~ " produce a better model. Finally, the" -#~ " *server* sends this improved version " -#~ "of the model back to each " -#~ "*client*. A complete cycle of weight " -#~ "updates is called a *round*." +#~ msgid "Then, start the two clients in a new terminal window:" #~ msgstr "" -#~ "*Les clients* sont chargés de générer" -#~ " des mises à jour de poids " -#~ "individuelles pour le modèle en fonction" -#~ " de leurs ensembles de données " -#~ "locaux. Ces mises à jour sont " -#~ "ensuite envoyées au *serveur* qui les" -#~ " agrège pour produire un meilleur " -#~ "modèle. Enfin, le *serveur* renvoie " -#~ "cette version améliorée du modèle à " -#~ "chaque *client*. Un cycle complet de " -#~ "mises à jour de poids s'appelle un" -#~ " *round*." +#~ "Ensuite, démarre les deux clients dans" +#~ " une nouvelle fenêtre de terminal :" -#~ msgid "" -#~ "Now that we have a rough idea " -#~ "of what is going on, let's get " -#~ "started to setup our Flower server " -#~ "environment. We first need to install" -#~ " Flower. You can do this by " -#~ "using pip:" -#~ msgstr "" -#~ "Maintenant que nous avons une idée " -#~ "approximative de ce qui se passe, " -#~ "commençons à configurer notre environnement" -#~ " de serveur Flower. Nous devons " -#~ "d'abord installer Flower, ce que tu " -#~ "peux faire à l'aide de pip :" +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgid "Or Poetry:" -#~ msgstr "Ou de la poésie :" +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training using CoreML " -#~ "as our local training pipeline and " -#~ "MNIST as our dataset. For simplicity " -#~ "reasons we will use the complete " -#~ "Flower client with CoreML, that has " -#~ "been implemented and stored inside the" -#~ " Swift SDK. The client implementation " -#~ "can be seen below:" -#~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, exécutons une simple " -#~ "formation distribuée en utilisant CoreML " -#~ "comme pipeline de formation local et " -#~ "MNIST comme ensemble de données. Pour" -#~ " des raisons de simplicité, nous " -#~ "utiliserons le client Flower complet " -#~ "avec CoreML, qui a été mis en " -#~ "œuvre et stocké à l'intérieur du " -#~ "SDK Swift. La mise en œuvre du " -#~ "client peut être vue ci-dessous :" +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 Classification des images" #~ msgid "" -#~ "Let's create a new application project" -#~ " in Xcode and add :code:`flwr` as " -#~ "a dependency in your project. For " -#~ "our application, we will store the " -#~ "logic of our app in " -#~ ":code:`FLiOSModel.swift` and the UI elements" -#~ " in :code:`ContentView.swift`. We will " -#~ "focus more on :code:`FLiOSModel.swift` in " -#~ "this quickstart. Please refer to the " -#~ "`full code example " -#~ "`_ to " -#~ "learn more about the app." +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." #~ msgstr "" -#~ "Créons un nouveau projet d'application " -#~ "dans Xcode et ajoutons :code:`flwr` " -#~ "comme dépendance dans ton projet. Pour" -#~ " notre application, nous stockerons la " -#~ "logique de notre application dans " -#~ ":code:`FLiOSModel.swift` et les éléments de" -#~ " l'interface utilisateur dans " -#~ ":code:`ContentView.swift`.Nous nous concentrerons " -#~ "davantage sur :code:`FLiOSModel.swift` dans ce" -#~ " quickstart. N'hésite pas à te " -#~ "référer à l'`exemple de code complet " -#~ "`_ pour" -#~ " en savoir plus sur l'application." +#~ "`ImageNet-2012 `_ est " +#~ "l'un des principaux ensembles de données" +#~ " de vision par ordinateur. L'exemple " +#~ "Flower ImageNet utilise PyTorch pour " +#~ "entraîner un classificateur ResNet-18 dans " +#~ "une configuration d'apprentissage fédéré avec" +#~ " dix clients." -#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -#~ msgstr "" -#~ "Importe les paquets liés à Flower " -#~ "et CoreML dans :code:`FLiOSModel.swift` :" +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgid "" -#~ "Then add the mlmodel to the " -#~ "project simply by drag-and-drop, " -#~ "the mlmodel will be bundled inside " -#~ "the application during deployment to " -#~ "your iOS device. We need to pass" -#~ " the url to access mlmodel and " -#~ "run CoreML machine learning processes, " -#~ "it can be retrieved by calling the" -#~ " function :code:`Bundle.main.url`. For the " -#~ "MNIST dataset, we need to preprocess " -#~ "it into :code:`MLBatchProvider` object. The" -#~ " preprocessing is done inside " -#~ ":code:`DataLoader.swift`." +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." #~ msgstr "" -#~ "Ensuite, ajoute le mlmodel au projet " -#~ "simplement par glisser-déposer, le " -#~ "mlmodel sera regroupé à l'intérieur de" -#~ " l'application lors du déploiement sur " -#~ "ton appareil iOS. Nous devons passer " -#~ "l'url pour accéder au mlmodel et " -#~ "exécuter les processus d'apprentissage " -#~ "automatique CoreML, elle peut être " -#~ "récupérée en appelant la fonction " -#~ ":code:`Bundle.main.url`. Pour l'ensemble de " -#~ "données MNIST, nous devons le prétraiter" -#~ " dans l'objet :code:`MLBatchProvider`. Le " -#~ "prétraitement est effectué à l'intérieur " -#~ "de :code:`DataLoader.swift`." +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/pytorch_imagenet`." + +#~ msgid "TensorFlow Examples" +#~ msgstr "Exemples de TensorFlow" #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." #~ msgstr "" +#~ "Nos exemples TensorFlow sont basés sur" +#~ " TensorFlow 2.0 ou une version plus" +#~ " récente. Jusqu'à présent, nous te " +#~ "proposons les exemples suivants." + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Classification d'images Fashion-MNIST" #~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." #~ msgstr "" +#~ "nous suivons cette tradition et " +#~ "fournissons un exemple qui échantillonne " +#~ "des ensembles de données locales " +#~ "aléatoires de Fashion-MNIST et entraîne" +#~ " un modèle simple de classification " +#~ "d'images sur ces partitions." + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgid "" -#~ "Then start the Flower gRPC client " -#~ "and start communicating to the server" -#~ " by passing our Flower client to " -#~ "the function :code:`startFlwrGRPC`." -#~ msgstr "" -#~ "Lance ensuite le client Flower gRPC " -#~ "et commence à communiquer avec le " -#~ "serveur en passant notre client Flower" -#~ " à la fonction :code:`startFlwrGRPC`." +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ "call the provided :code:`MLFlwrClient` and " -#~ "call :code:`startFlwrGRPC()`. The attribute " -#~ ":code:`hostname` and :code:`port` tells the" -#~ " client which server to connect to." -#~ " This can be done by entering " -#~ "the hostname and port in the " -#~ "application before clicking the start " -#~ "button to start the federated learning" -#~ " process." +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ "d'appeler le :code:`MLFlwrClient` fourni et" -#~ " d'appeler :code:`startFlwrGRPC()`. L'attribut " -#~ ":code:`hostname` et :code:`port` indique au" -#~ " client à quel serveur se connecter." -#~ " Pour ce faire, il suffit d'entrer" -#~ " le nom d'hôte et le port dans" -#~ " l'application avant de cliquer sur " -#~ "le bouton de démarrage pour lancer " -#~ "le processus d'apprentissage fédéré." +#~ "Pour plus de détails, voir " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgid "" -#~ "Once the server is running we can" -#~ " start the clients in different " -#~ "terminals. Build and run the client " -#~ "through your Xcode, one through Xcode" -#~ " Simulator and the other by deploying" -#~ " it to your iPhone. To see more" -#~ " about how to deploy your app " -#~ "to iPhone or Simulator visit `here " -#~ "`_." +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." #~ msgstr "" -#~ "Une fois que le serveur fonctionne, " -#~ "nous pouvons démarrer les clients dans" -#~ " différents terminaux. Construis et exécute" -#~ " le client grâce à ton Xcode, " -#~ "l'un via le simulateur Xcode et " -#~ "l'autre en le déployant sur ton " -#~ "iPhone. Pour en savoir plus sur la" -#~ " façon de déployer ton application " -#~ "sur l'iPhone ou le simulateur, visite" -#~ " `ici `_." #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system in your ios device. The " -#~ "full `source code " -#~ "`_ for" -#~ " this example can be found in " -#~ ":code:`examples/ios`." +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" #~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré " -#~ "dans ton appareil ios. Le `code " -#~ "source complet " -#~ "`_ de " -#~ "cet exemple se trouve dans " -#~ ":code:`examples/ios`." -#~ msgid "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ and join " -#~ "the open-source Flower community on " -#~ "Slack to connect, ask questions, and " -#~ "get help: `Join Slack `__ 🌼 We'd love to hear" -#~ " from you in the ``#introductions`` " -#~ "channel! And if anything is unclear, " -#~ "head over to the ``#questions`` channel." +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" #~ msgstr "" -#~ "`Star Flower on GitHub " -#~ "`__ ⭐️ et rejoignez" -#~ " la communauté open-source Flower sur" -#~ " Slack pour vous connecter, poser des" -#~ " questions et obtenir de l'aide : " -#~ "`Join Slack `__ " -#~ "🌼 Nous serions ravis d'avoir de " -#~ "vos nouvelles dans le canal " -#~ "``#introductions`` ! Et si quelque chose" -#~ " n'est pas clair, dirigez-vous vers" -#~ " le canal ``#questions``." -#~ msgid "|bd48315a61c14495babefe3c7918b493|" +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" #~ msgstr "" -#~ msgid "|c00d9e5b0d324d96b86da8a78b05b14b|" +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" #~ msgstr "" -#~ msgid "|faae2ee10f4149c9907563c4f48ec6ea|" +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" #~ msgstr "" -#~ msgid "|13a655510351455292f145a61d6c15d6|" +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" #~ msgstr "" -#~ msgid "|13949884182846e3a91433190a936ba9|" +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" #~ msgstr "" -#~ msgid "|9bf26cc650b146e88b4745df040ece37|" +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" #~ msgstr "" -#~ msgid "|1590915480fc41708bd43e48af9582f9|" +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" #~ msgstr "" -#~ msgid "|e5ee96d702b64256b97b8ca99db10787|" +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" #~ msgstr "" -#~ msgid "|84840b244edd47c481278ce534c126cd|" +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" #~ msgstr "" -#~ msgid "|f33f5ebb3a844a2ba54bb6be3571b172|" +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" #~ msgstr "" -#~ msgid "|5645db4ba9c945518d51ff234f35c797|" +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" #~ msgstr "" -#~ msgid "|317af8d28fcc479ab981047d058c4751|" +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" #~ msgstr "" -#~ msgid "|8bfd0e697a494d5385662debafade6bf|" +#~ msgid "|08cb60859b07461588fe44e55810b050|" #~ msgstr "" #~ msgid "" -#~ "Differential privacy (DP) is often " -#~ "mentioned in the context of Federated" -#~ " Learning. It is a privacy-preserving" -#~ " method used when analyzing and " -#~ "sharing statistical data, ensuring the " -#~ "privacy of individual participants. DP " -#~ "achieves this by adding statistical " -#~ "noise to the model updates, ensuring " -#~ "any individual participants’ information " -#~ "cannot be distinguished or re-" -#~ "identified. This technique can be " -#~ "considered an optimization that provides " -#~ "a quantifiable privacy protection measure." -#~ msgstr "" -#~ "La confidentialité différentielle (DP) est " -#~ "souvent mentionnée dans le contexte de" -#~ " l'apprentissage fédéré. Il s'agit d'une" -#~ " méthode de préservation de la vie" -#~ " privée utilisée lors de l'analyse et" -#~ " du partage de données statistiques, " -#~ "garantissant la confidentialité des " -#~ "participants individuels. La DP y " -#~ "parvient en ajoutant un bruit " -#~ "statistique aux mises à jour du " -#~ "modèle, garantissant que toute information " -#~ "sur les participants individuels ne peut" -#~ " être distinguée ou réidentifiée. Cette " -#~ "technique peut être considérée comme une" -#~ " optimisation qui fournit une mesure " -#~ "quantifiable de protection de la vie " -#~ "privée." - -#~ msgid "|e5dc001d27ad460caeab669e957b3c36|" +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub " +#~ "`_ that include" +#~ " all necessary dependencies for running " +#~ "the server. You can also build " +#~ "your own custom docker images from " +#~ "scratch with a different version of " +#~ "Python or Ubuntu if that is what" +#~ " you need. In this guide, we " +#~ "will explain what images exist and " +#~ "how to build them locally." #~ msgstr "" -#~ msgid "API Reference - Flower binaries" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " base image and a server image. " +#~ "There will also be a client image" +#~ " soon. The base image, as the " +#~ "name suggests, contains basic dependencies " +#~ "that both the server and the " +#~ "client need. This includes system " +#~ "dependencies, Python and Python tools. " +#~ "The server image is based on the" +#~ " base image, but it additionally " +#~ "installs the Flower server using " +#~ "``pip``." #~ msgstr "" -#~ msgid "API Reference - flwr" -#~ msgstr "Référence pour l'API" - #~ msgid "" -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might change " -#~ "considerably in future versions of " -#~ "Flower." +#~ "Both, base and server image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." #~ msgstr "" -#~ msgid "Returns a client's set of properties." +#~ msgid "Defaults to ``flwr/server``." #~ msgstr "" -#~ msgid "" -#~ "Defines whether or not the client " -#~ "is interacting with the server using " -#~ "the experimental REST API. This feature" -#~ " is experimental, it might be change" -#~ " considerably in future versions of " -#~ "Flower." +#~ msgid "``BASE_IMAGE_TAG``" #~ msgstr "" -#~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "str argument called `cid`. It should " -#~ "return a single client instance of " -#~ "type ClientLike. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not attempt to carry state " -#~ "over method invocations. Any state " -#~ "required by the instance (model, " -#~ "dataset,hyperparameters, ...) should be " -#~ "(re-)created in either the call to " -#~ "`client_fn` or the call to any of" -#~ " the client methods (e.g., load " -#~ "evaluation data in the `evaluate` method" -#~ " itself)." +#~ msgid "The image tag of the base image." #~ msgstr "" -#~ msgid "" -#~ "A function creating client instances. " -#~ "The function must take a single " -#~ "str argument called `cid`. It should " -#~ "return a single client instance of " -#~ "type ClientLike. Note that the created" -#~ " client instances are ephemeral and " -#~ "will often be destroyed after a " -#~ "single method invocation. Since client " -#~ "instances are not long-lived, they " -#~ "should not" +#~ msgid "Defaults to ``py3.11-ubuntu22.04``." #~ msgstr "" -#~ msgid "attempt to carry state over method invocations. Any state required by" +#~ msgid "" +#~ "The following example creates a server" +#~ " image with the official Flower base" +#~ " image py3.11-ubuntu22.04 and Flower 1.7.0:" #~ msgstr "" #~ msgid "" -#~ "the instance (model, dataset,hyperparameters, " -#~ "...) should be (re-)created in either" -#~ " the call to `client_fn` or the " -#~ "call to any of the client methods" -#~ " (e.g., load evaluation data in the" -#~ " `evaluate` method itself)." +#~ "The name of image is ``flwr_server`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." #~ msgstr "" #~ msgid "" -#~ "\\frac{\\mu}{2} || w - w^t ||^2\n" -#~ "\n" +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "and ``BASE_IMAGE_TAG`` build arguments. The" +#~ " value of ``BASE_REPOSITORY`` must match" +#~ " the name of your image and the" +#~ " value of ``BASE_IMAGE_TAG`` must match " +#~ "the tag of your image." #~ msgstr "" #~ msgid "" -#~ "Adaptive Federated Optimization using Adagrad" -#~ " (FedAdagrad) [Reddi et al., 2020] " -#~ "strategy." +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." #~ msgstr "" +#~ msgid "Open a PR (as shown above)" +#~ msgstr "Ouvre un RP (comme indiqué ci-dessus)" + #~ msgid "" -#~ "Adaptive Federated Optimization using Adam " -#~ "(FedAdam) [Reddi et al., 2020] strategy." +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "Add CI job to deploy the staging system when the `main` branch changes" + +#~ msgid "Changelog entry" +#~ msgstr "Changelog" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." #~ msgstr "" #~ msgid "" -#~ "Adaptive Federated Optimization using Yogi " -#~ "(FedYogi) [Reddi et al., 2020] strategy." +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" #~ msgstr "" -#~ msgid "Contributing Baselines" -#~ msgstr "Configuration du contributeur" +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "" #~ msgid "" -#~ "Do you have a new federated " -#~ "learning paper and want to add a" -#~ " new baseline to Flower? Or do " -#~ "you want to add an experiment to" -#~ " an existing baseline paper? Great, " -#~ "we really appreciate your contribution." +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." #~ msgstr "" #~ msgid "" -#~ "The goal of Flower Baselines is to" -#~ " reproduce experiments from popular papers" -#~ " to accelerate researchers by enabling " -#~ "faster comparisons to new strategies, " -#~ "datasets, models, and federated pipelines " -#~ "in general." +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." #~ msgstr "" #~ msgid "" -#~ "Before you start to work on a " -#~ "new baseline or experiment, please check" -#~ " the `Flower Issues " -#~ "`_ or `Flower " -#~ "Pull Requests `_ " -#~ "to see if someone else is already" -#~ " working on it. Please open a " -#~ "new issue if you are planning to" -#~ " work on a new baseline or " -#~ "experiment with a short description of" -#~ " the corresponding paper and the " -#~ "experiment you want to contribute." +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" #~ msgstr "" -#~ msgid "TL;DR: Adding a new Flower Baseline" +#~ msgid " is for classifying a PR as a general improvement." #~ msgstr "" -#~ msgid "" -#~ "Let's say you want to contribute " -#~ "the code of your most recent " -#~ "Federated Learning publication, *FedAweseome*. " -#~ "There are only three steps necessary " -#~ "to create a new *FedAweseome* Flower " -#~ "Baseline:" +#~ msgid " is to not add the PR to the changelog" #~ msgstr "" -#~ msgid "**Get the Flower source code on your machine**" +#~ msgid " is to add a general baselines change to the PR" #~ msgstr "" -#~ msgid "" -#~ "Fork the Flower codebase: got to " -#~ "the `Flower GitHub repo " -#~ "`_ and fork the " -#~ "code (click the *Fork* button in " -#~ "the top-right corner and follow " -#~ "the instructions)" +#~ msgid " is to add a general examples change to the PR" #~ msgstr "" -#~ msgid "" -#~ "Clone the (forked) Flower source code:" -#~ " :code:`git clone " -#~ "git@github.com:[your_github_username]/flower.git`" +#~ msgid " is to add a general sdk change to the PR" #~ msgstr "" -#~ msgid "" -#~ "Open the code in your favorite " -#~ "editor (e.g., using VSCode: ``cd flower" -#~ " ; code .``)" +#~ msgid " is to add a general simulations change to the PR" #~ msgstr "" -#~ msgid "**Add the FedAwesome code**" +#~ msgid "Note that only one token should be used." #~ msgstr "" #~ msgid "" -#~ "Add your :code:`FedAwesome` code under " -#~ ":code:`baselines/flwr_baselines/publications/[fedawesome]`" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" #~ msgstr "" -#~ msgid "Add a `pyproject.toml` with all necessary dependencies" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" #~ msgstr "" -#~ msgid "Add a `README.md` describing how to use your baseline" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" #~ msgstr "" -#~ msgid "**Open a pull request**" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." #~ msgstr "" -#~ msgid "Stage your changes: :code:`git add .`" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" #~ msgstr "" #~ msgid "" -#~ "Commit & push: :code:`git commit -m " -#~ "\"Create new FedAweseome baseline\" ; " -#~ "git push`" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" #~ msgid "" -#~ "Open a pull request: go to *your*" -#~ " fork of the Flower codebase and " -#~ "create a pull request that targets " -#~ "the Flower ``main``` branch" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" -#~ msgid "Further reading:" -#~ msgstr "Aide supplémentaire" +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "" #~ msgid "" -#~ "`GitHub docs: About forks " -#~ "`_" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" #~ msgstr "" #~ msgid "" -#~ "`GitHub docs: Creating a pull request" -#~ " `_" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "" + +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "Exemple : MXNet - Exécuter MXNet Federated" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." #~ msgstr "" +#~ "Ce tutoriel te montrera comment utiliser" +#~ " Flower pour construire une version " +#~ "fédérée d'une charge de travail MXNet" +#~ " existante. Nous utilisons MXNet pour " +#~ "former un modèle séquentiel sur " +#~ "l'ensemble de données MNIST. Nous " +#~ "structurerons l'exemple de la même " +#~ "manière que notre présentation `PyTorch " +#~ "- De la centralisation à la " +#~ "fédération `_. " +#~ "MXNet et PyTorch sont très similaires" +#~ " et une très bonne comparaison entre" +#~ " MXNet et PyTorch est donnée ici " +#~ "`_. Tout " +#~ "d'abord, nous construisons une approche " +#~ "de formation centralisée basée sur le" +#~ " tutoriel `Handandwritten Digit Recognition " +#~ "`_." +#~ " Ensuite, nous nous basons sur le " +#~ "code de formation centralisé pour " +#~ "exécuter la formation de manière " +#~ "fédérée." #~ msgid "" -#~ "`GitHub docs: Creating a pull request" -#~ " from a fork `_" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" #~ msgstr "" +#~ "Avant de commencer à configurer notre" +#~ " exemple MXNet, nous installons les " +#~ "paquets :code:`mxnet` et :code:`flwr` :" -#~ msgid "Requirements" -#~ msgstr "Changements nécessaires" +#~ msgid "MNIST Training with MXNet" +#~ msgstr "Formation MNIST avec MXNet" #~ msgid "" -#~ "Contributing a new baseline is really" -#~ " easy. You only have to make " -#~ "sure that your federated learning " -#~ "experiments are running with Flower. As" -#~ " soon as you have created a " -#~ "Flower-based experiment, you can contribute" -#~ " it." +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." #~ msgstr "" +#~ "Nous commençons par une brève " +#~ "description du code d'entraînement centralisé" +#~ " basé sur un modèle :code:`Sequential`. " +#~ "Si tu veux une explication plus " +#~ "approfondie de ce qui se passe, " +#~ "jette un coup d'œil au tutoriel " +#~ "officiel `MXNet " +#~ "`_." #~ msgid "" -#~ "It is recommended (but not required) " -#~ "to use `Hydra `_ to " -#~ "execute the experiment." +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." #~ msgstr "" +#~ "Créons un nouveau fichier appelé " +#~ ":code:`mxnet_mnist.py` avec tous les " +#~ "composants requis pour un apprentissage " +#~ "MNIST traditionnel (centralisé). Tout d'abord," +#~ " le package MXNet :code:`mxnet` doit " +#~ "être importé. Tu peux voir que " +#~ "nous n'avons pas encore importé le " +#~ "package :code:`flwr` pour l'apprentissage " +#~ "fédéré. Cela sera fait plus tard." #~ msgid "" -#~ "Please make sure to add your " -#~ "baseline or experiment to the " -#~ "corresponding directory as explained in " -#~ "`Executing Baseline `_. Give your baseline the " -#~ "unique identifier. For example, :code:`fedbn`" -#~ " refers to the paper \"FedBN: " -#~ "Federated Learning on non-IID Features" -#~ " via Local Batch Normalization\" and " -#~ "creates the corresponding directory " -#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn`. Then" -#~ " you create the experiment directory " -#~ "with the experiment name. For example," -#~ " the experiment that measures the " -#~ "convergence has the directory " -#~ ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`." -#~ " This directory contains all your " -#~ "code and a :code:`README.md` with a " -#~ "link to the paper, the paper's " -#~ "abstract, and a detailed description of" -#~ " how to execute the experiments." +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." #~ msgstr "" +#~ "La fonction :code:`load_data()` charge les " +#~ "ensembles d'entraînement et de test " +#~ "MNIST." #~ msgid "" -#~ "Please also check if :code:`pyproject.toml`" -#~ " and :code:`requirements.txt` (all in the" -#~ " directory `baselines " -#~ "`_ contain" -#~ " all required Python packages (libraries," -#~ " frameworks, ...). If the required " -#~ "Python package is not yet listed, " -#~ "please add it to :code:`pyproject.toml`. " -#~ "If you need a different version of" -#~ " a package already listed, please try" -#~ " to ensure your experiment runs with" -#~ " the existing version listed in " -#~ ":code:`pyproject.toml` (or :code:`requirements.txt`). " -#~ "If that doesn't work, open a " -#~ "GitHub Issue and request the version " -#~ "change." +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." #~ msgstr "" +#~ "Comme nous l'avons déjà mentionné, nous" +#~ " utiliserons l'ensemble de données MNIST" +#~ " pour cette charge de travail " +#~ "d'apprentissage automatique. L'architecture du " +#~ "modèle (un modèle :code:`Séquentiel` très " +#~ "simple) est définie dans :code:`model()`." #~ msgid "" -#~ "The experiment also needs to contain " -#~ "a file with a downloader for the" -#~ " dataset - if possible automatic. " -#~ "This can be included in one of " -#~ "the files or as an extra file." +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." #~ msgstr "" +#~ "Nous devons maintenant définir la " +#~ "formation (fonction :code:`train()`) qui passe" +#~ " en boucle sur l'ensemble de la " +#~ "formation et mesure la perte pour " +#~ "chaque lot d'exemples de formation." #~ msgid "" -#~ "Finally, please add plots for all " -#~ "experimental results your code is " -#~ "running to the :code:`experiment` directory" -#~ " and include them in :code:`README.md`. " -#~ "Doing this helps others and enables " -#~ "them to recognize your contributions " -#~ "quickly." +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." #~ msgstr "" +#~ "L'évaluation du modèle est définie dans" +#~ " la fonction :code:`test()`. Cette fonction" +#~ " passe en boucle sur tous les " +#~ "échantillons de test et mesure la " +#~ "perte et la précision du modèle en" +#~ " fonction de l'ensemble des données " +#~ "de test." #~ msgid "" -#~ "We are aware that a few libraries" -#~ " are available only via Conda. " -#~ "However, we want to encourage you " -#~ "to ensure that your code also runs" -#~ " well outside of Conda to make " -#~ "it more accessible to the broader " -#~ "research community." -#~ msgstr "" - -#~ msgid "Here is a checklist for adding a new baseline:" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." #~ msgstr "" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, " +#~ "l'entraînement et l'évaluation, nous pouvons" +#~ " tout assembler et entraîner notre " +#~ "modèle sur MNIST. Note que le " +#~ "dispositif GPU/CPU pour l'entraînement et " +#~ "le test est défini dans le " +#~ ":code:`ctx` (contexte)." -#~ msgid "" -#~ "add required Python packages to " -#~ ":code:`pyproject.toml` or :code:`requirements.txt`" +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" #~ msgstr "" +#~ "Tu peux maintenant exécuter ta charge" +#~ " de travail (centralisée) d'apprentissage " +#~ "automatique MXNet :" #~ msgid "" -#~ "add all required code under " -#~ ":code:`baselines/flwr_baselines/publications/[new_publication]`" -#~ msgstr "" - -#~ msgid "add a dataset downloader" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." #~ msgstr "" +#~ "Jusqu'à présent, tout cela devrait te" +#~ " sembler assez familier si tu as " +#~ "déjà utilisé MXNet (ou même PyTorch)." +#~ " Passons à l'étape suivante et " +#~ "utilisons ce que nous avons construit" +#~ " pour créer un simple système " +#~ "d'apprentissage fédéré composé d'un serveur" +#~ " et de deux clients." -#~ msgid "add an experiment plot" -#~ msgstr "" +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet rencontre Flower" -#~ msgid "add a :code:`README.md`" +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." #~ msgstr "" +#~ "Jusqu'à présent, il n'était pas facile" +#~ " d'utiliser les charges de travail " +#~ "MXNet pour l'apprentissage fédéré car " +#~ "l'apprentissage fédéré n'est pas pris en" +#~ " charge dans MXNet. Comme Flower est" +#~ " totalement agnostique vis-à-vis du cadre" +#~ " d'apprentissage automatique sous-jacent, " +#~ "il peut être utilisé pour fédérer " +#~ "des charges de travail d'apprentissage " +#~ "automatique arbitraires. Cette section te " +#~ "montrera comment Flower peut être " +#~ "utilisé pour fédérer notre charge de " +#~ "travail MXNet centralisée." -#~ msgid "Usability" +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." #~ msgstr "" +#~ "Le concept pour fédérer une charge " +#~ "de travail existante est toujours le " +#~ "même et facile à comprendre. Nous " +#~ "devons démarrer un *serveur* et ensuite" +#~ " utiliser le code dans " +#~ ":code:`mxnet_mnist.py` pour les *clients* qui" +#~ " sont connectés au *serveur*. Le " +#~ "*serveur* envoie les paramètres du " +#~ "modèle aux clients. Les *clients* " +#~ "exécutent la formation et mettent à " +#~ "jour les paramètres. Les paramètres mis" +#~ " à jour sont renvoyés au *serveur*" +#~ " qui fait la moyenne de toutes " +#~ "les mises à jour de paramètres " +#~ "reçues. Ceci décrit un tour du " +#~ "processus d'apprentissage fédéré et nous " +#~ "répétons cette opération pour plusieurs " +#~ "tours." #~ msgid "" -#~ "Flower is known and loved for its" -#~ " usability. Therefore, make sure that " -#~ "your baseline or experiment can be " -#~ "executed with a single command such " -#~ "as :code:`./run.sh` or :code:`python3 " -#~ "main.py`. How you organize the " -#~ "experiments and the related code " -#~ "structure is up to you as an " -#~ "author, but please keep in mind to" -#~ " make sure that other users can " -#~ "easily understand and execute your " -#~ "baseline." +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" #~ msgstr "" +#~ "Enfin, nous allons définir la logique" +#~ " de notre *client* dans :code:`client.py`" +#~ " et nous appuyer sur l'entraînement " +#~ "MXNet défini précédemment dans " +#~ ":code:`mxnet_mnist.py`. Notre *client* doit " +#~ "importer :code:`flwr`, mais aussi " +#~ ":code:`mxnet` pour mettre à jour les " +#~ "paramètres de notre modèle MXNet :" -#~ msgid "We look forward to your contribution!" -#~ msgstr "Exemple de première contribution" - -#~ msgid "flwr" -#~ msgstr "Fleur" - -#~ msgid "binaries" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" #~ msgstr "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" -#~ msgid "Flower Baselines" -#~ msgstr "Demande pour une nouvelle Flower Baseline" +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "transforme les :code:`NDArray` du MXNet en :code:`ndarray` de NumPy" #~ msgid "" -#~ "Flower Baselines are a collection of " -#~ "organised scripts used to reproduce " -#~ "results from well-known publications or" -#~ " benchmarks. You can check which " -#~ "baselines already exist and/or contribute " -#~ "your own baseline." +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." #~ msgstr "" +#~ "La partie la plus difficile est de" +#~ " transformer les paramètres MXNet de " +#~ ":code:`NDArray` en :code:`NumPy Arrays` pour" +#~ " les rendre lisibles pour Flower." -#~ msgid "Flower requires `Python 3.7 `_ or above." -#~ msgstr "`Python 3.7 `_ ou plus" - -#~ msgid "|9e234df38403464899ad3aee36bf1b95|" +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" +#~ "Les deux méthodes :code:`NumPyClient` " +#~ ":code:`fit` et :code:`evaluate` utilisent les" +#~ " fonctions :code:`train()` et :code:`test()` " +#~ "définies précédemment dans :code:`mxnet_mnist.py`." +#~ " Ce que nous faisons vraiment ici," +#~ " c'est que nous indiquons à Flower," +#~ " par le biais de notre sous-" +#~ "classe :code:`NumPyClient`, laquelle de nos" +#~ " fonctions déjà définies doit être " +#~ "appelée pour l'entraînement et l'évaluation." +#~ " Nous avons inclus des annotations de" +#~ " type pour te donner une meilleure" +#~ " compréhension des types de données " +#~ "qui sont transmis." -#~ msgid "|081158351506446f9f772cb45ee68523|" +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." #~ msgstr "" +#~ "Après avoir défini le chargement des " +#~ "données, l'architecture du modèle, la " +#~ "formation et l'évaluation, nous pouvons " +#~ "tout rassembler et former notre modèle" +#~ " :code:`Sequential` sur MNIST." -#~ msgid "|e9325042b79c45ed96b5a8d2f6f3cdc9|" +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" #~ msgstr "" +#~ "dans chaque fenêtre (assure-toi que " +#~ "le serveur est toujours en cours " +#~ "d'exécution avant de le faire) et " +#~ "tu verras ton projet MXNet exécuter " +#~ "l'apprentissage fédéré sur deux clients. " +#~ "Félicitations !" -#~ msgid "|11b83bb107344db78a37266e080c4a7a|" +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" #~ msgstr "" +#~ "Le code source complet de cet " +#~ "exemple : `MXNet : From Centralized " +#~ "To Federated (Code) " +#~ "`_. Notre " +#~ "exemple est bien sûr un peu trop" +#~ " simplifié parce que les deux clients" +#~ " chargent exactement le même ensemble " +#~ "de données, ce qui n'est pas " +#~ "réaliste. Tu es maintenant prêt à " +#~ "explorer ce sujet plus en profondeur." +#~ " Pourquoi ne pas utiliser un CNN " +#~ "ou un ensemble de données différent " +#~ "? Pourquoi ne pas ajouter d'autres " +#~ "clients ?" -#~ msgid "|cd764bcf6d174a9cb62880ace9a8a6bd|" -#~ msgstr "" +#~ msgid "with the following command sequence:" +#~ msgstr "avec la séquence de commandes suivante :" -#~ msgid "|5c520984cced41e38f6bb4af416c3f84|" +#~ msgid "" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." #~ msgstr "" +#~ "Si tu es un chercheur, tu peux " +#~ "très bien utiliser les certificats " +#~ "auto-signés générés à l'aide des " +#~ "scripts qui font partie de ce " +#~ "guide." -#~ msgid "|66941b0608644cf1a2269a194d3bc0dd|" +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." #~ msgstr "" +#~ "Nous allons maintenant montrer comment " +#~ "écrire un serveur qui utilise les " +#~ "scripts générés précédemment." -#~ msgid "|4b149f3a095b402bb8890275aabc9298|" +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." #~ msgstr "" +#~ "Lorsqu'il fournit des certificats, le " +#~ "serveur attend un tuple de trois " +#~ "certificats. :code:`Path` peut être utilisé" +#~ " pour lire facilement le contenu de" +#~ " ces fichiers en chaînes d'octets, ce" +#~ " qui est le type de données " +#~ "attendu par :code:`start_server`." -#~ msgid "|675cf7d3d53a4817b5d47529c0758158|" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." #~ msgstr "" -#~ msgid "|7ca594e16ae7477790c2e3cf096ec7cd|" -#~ msgstr "" +#~ msgid "Flower server" +#~ msgstr "Serveur de Flower" -#~ msgid "|d669336577b545a081d5d74169a9bc4d|" +#~ msgid "" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" -#~ msgid "|00b3d6cde1ff410ba54eff58da4e033a|" +#~ msgid "" +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" -#~ msgid "|29a11f5353084c1995c538f7edef71a5|" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" -#~ msgid "|d62eda312fd44726bb5db2b761fe7e0d|" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." #~ msgstr "" -#~ msgid "Using Baselines" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" #~ msgstr "" -#~ msgid "Structure" +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" #~ msgid "" -#~ "All baselines are available in the " -#~ "directory `baselines " -#~ "`_. This " -#~ "directory has two different files:" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" #~ msgid "" -#~ "Both files contain all the information" -#~ " about required Python packages (libraries," -#~ " frameworks, ...) and their versions. " -#~ "You can install each library separately" -#~ " by using :code: `pip install` or " -#~ "you can use Poetry and run " -#~ "code:`poetry install` in the directory " -#~ "where you find the :code:`pyproject.toml` " -#~ "file. After installing all requirements, " -#~ "you can start to run your " -#~ "baseline." +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." #~ msgstr "" #~ msgid "" -#~ "Go to the baseline that you want" -#~ " to execute. The directories and " -#~ "files are structured so that you " -#~ "can first find the paper with " -#~ "their unique identifier such that, for" -#~ " example, :code:`FedProx` refers to the " -#~ "paper \"Federated Optimization in " -#~ "Heterogeneous Networks\". The :code:`fedprox` " -#~ "section contains all available experiments " -#~ "from that paper." +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." #~ msgstr "" #~ msgid "" -#~ "The experiment area contains a " -#~ ":code:`README.md` covering the corresponding " -#~ "paper, its abstract, and goal as " -#~ "well as a detailed description of " -#~ "how to run the baseline. Please " -#~ "use the :code:`README.md` to see how " -#~ "to execute each individual baseline." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." #~ msgstr "" -#~ msgid "Available Baselines" +#~ msgid "Using a different Flower or Python version" #~ msgstr "" #~ msgid "" -#~ "The following table lists all currently" -#~ " available baselines and the corresponding" -#~ " papers. If you want to add a" -#~ " new baseline or experiment, please " -#~ "check the `Contributing Baselines " -#~ "`_ section." +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." #~ msgstr "" -#~ msgid "Paper" +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" #~ msgstr "" -#~ msgid "Experiment" +#~ msgid "Next, we can pin the hash when running a new server container:" #~ msgstr "" -#~ msgid "Directory" -#~ msgstr "" +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" -#~ msgid "`FedAvg `_" -#~ msgstr "" +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" -#~ msgid "MNIST" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedavg_mnist/`" +#~ msgid "" +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" #~ msgstr "" -#~ msgid "`FedProx `_" +#~ msgid "" +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedprox_mnist/`" +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" #~ msgstr "" -#~ msgid "`FedOpt `_" +#~ msgid "Run Flower server (Driver API and Fleet API)." #~ msgstr "" -#~ msgid "sparse gradient task" +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/adaptive_federated_optimization`" +#~ msgid "Start a Flower Driver API server." +#~ msgstr "Tout d'abord, démarre un serveur Flower :" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" #~ msgstr "" +#~ "Flower 1.0 : ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" -#~ msgid "`FedBN `_" +#~ msgid "`Driver` class provides an interface to the Driver API." #~ msgstr "" -#~ msgid "convergence rate" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." #~ msgstr "" -#~ msgid ":code:`flower/baselines/flwr_baselines/publications/fedbn/convergence_rate`" +#~ msgid "Disconnect from the SuperLink if connected." #~ msgstr "" #~ msgid "" -#~ "Flower requires `Python 3.7 " -#~ "`_ or above, we " -#~ "recommend `Python 3.8 " -#~ "`_." +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" #~ msgstr "" -#~ "Flower nécessite `Python 3.7 " -#~ "`_ ou plus, nous " -#~ "recommandons `Python 3.8 " -#~ "`_." -#~ msgid "|6baade94cd14454e82ead34fcc29a182|" +#~ msgid "" +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." #~ msgstr "" -#~ msgid "|1209ecd819104c458d396cf665c7ed4f|" -#~ msgstr "" +#~ msgid "start\\_driver" +#~ msgstr "start_client" -#~ msgid "|c088b02349304344a53f3ce1464225fb|" +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." #~ msgstr "" -#~ msgid "|b54d50afc82a4a57a55997a9eaeb735b|" +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." #~ msgstr "" -#~ msgid "|d17b57e97b714a25b43790d4b832fd87|" +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." #~ msgstr "" -#~ msgid "|38966d05301a4854aa73c8c5033bfaab|" +#~ msgid "The Driver object to use." #~ msgstr "" -#~ msgid "|231d55f7926d4a5db02dcd724ec62529|" +#~ msgid "Starting a driver that connects to an insecure server:" #~ msgstr "" -#~ msgid "|fb44f2e13a1b4b69b7a72234eedd13f4|" +#~ msgid "Starting a driver that connects to an SSL-enabled server:" #~ msgstr "" -#~ msgid "|1cfc77af5d164030942e84d14268c256|" +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" #~ msgstr "" -#~ msgid "|0d50828231a64bc08223544a2d2fa216|" +#~ msgid "Run Simulation Engine from the CLI." #~ msgstr "" -#~ msgid "|904387757ceb42fbaa1875f3e8061113|" -#~ msgstr "" +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "Simulation de moniteur" -#~ msgid "|68608e1b7c4842458c528b431c715f5a|" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." #~ msgstr "" -#~ msgid "|2adb106bda97480bb4b33eac472e321e|" +#~ msgid "Quickstart MXNet" +#~ msgstr "Démarrage rapide de MXNet" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." #~ msgstr "" -#~ msgid "|025f0a6f7a6145cba4bf8fa0e2495851|" +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à former un modèle :code:`Sequential` " +#~ "sur MNIST à l'aide de Flower et" +#~ " de MXNet." -#~ msgid "Before the release" -#~ msgstr "Avant la sortie" +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" #~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." #~ msgstr "" -#~ "Mettez à jour le journal des " -#~ "modifications (``changelog.md``) avec tous les" -#~ " changements pertinents qui se sont " -#~ "produits après la dernière version. Si" -#~ " la dernière version a été étiquetée" -#~ " ``v1.2.0``, vous pouvez utiliser l'URL " -#~ "suivante pour voir tous les commits " -#~ "qui ont été fusionnés dans ``main`` " -#~ "depuis lors :" +#~ "Maintenant que toutes nos dépendances " +#~ "sont installées, lançons une formation " +#~ "distribuée simple avec deux clients et" +#~ " un serveur. Notre procédure de " +#~ "formation et l'architecture du réseau " +#~ "sont basées sur le tutoriel de " +#~ "reconnaissance de chiffres écrits à la" +#~ " main du MXNet " +#~ "`_." #~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" #~ msgstr "" -#~ "`GitHub : Compare v1.2.0...main " -#~ "`_" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "au MXNet :" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" #~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This command helps" -#~ " extract them: ``git log --format='%aN' " -#~ "v1.1.0..HEAD | sort -u``. The command" -#~ " has the same order as ``git " -#~ "shortlog``." +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." #~ msgstr "" -#~ "Remerciez les auteurs qui ont contribué" -#~ " depuis la dernière version. Cette " -#~ "commande permet de les extraire : " -#~ "``git log --format='%aN' v1.1.0..HEAD | " -#~ "sort -u``. La commande a le même" -#~ " ordre que ``git shortlog``." +#~ "Nous utilisons MXNet pour charger MNIST," +#~ " un ensemble de données de " +#~ "classification d'images populaire de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " +#~ "télécharge les données d'entraînement et " +#~ "de test." #~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." #~ msgstr "" -#~ "Mettez à jour l'en-tête de section" -#~ " ``changelog.md`` ``Unreleased`` pour qu'il " -#~ "contienne le numéro de version et " -#~ "la date de la version que vous " -#~ "construisez. Créez une demande de " -#~ "traction avec le changement." +#~ "Définis l'entraînement et la perte avec" +#~ " MXNet. Nous entraînons le modèle en" +#~ " parcourant en boucle l'ensemble des " +#~ "données, nous mesurons la perte " +#~ "correspondante et nous l'optimisons." #~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." #~ msgstr "" -#~ "Marquez le commit de la version " -#~ "avec le numéro de version dès que" -#~ " le PR est fusionné : ``git tag" -#~ " v0.12.3``, puis ``git push --tags``" +#~ "Ensuite, nous définissons la validation " +#~ "de notre modèle d'apprentissage automatique." +#~ " Nous effectuons une boucle sur " +#~ "l'ensemble de test et mesurons à " +#~ "la fois la perte et la précision" +#~ " sur l'ensemble de test." #~ msgid "" -#~ "Build the release with ``./dev/build.sh``, " -#~ "then publish it with ``./dev/publish.sh``" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." #~ msgstr "" -#~ "Construisez la version avec " -#~ "``./dev/build.sh``, puis publiez-la avec " -#~ "``./dev/publish.sh``" +#~ "Après avoir défini la formation et " +#~ "le test d'un modèle d'apprentissage " +#~ "automatique MXNet, nous utilisons ces " +#~ "fonctions pour mettre en œuvre un " +#~ "client Flower." + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" #~ msgid "" -#~ "Create an entry in GitHub releases " -#~ "with the release notes for the " -#~ "previously tagged commit and attach the" -#~ " build artifacts (:code:`.whl` and " -#~ ":code:`.tar.gz`)." +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." #~ msgstr "" -#~ "Crée une entrée dans GitHub releases " -#~ "avec les notes de version pour le" -#~ " commit précédemment étiqueté et attache" -#~ " les artefacts de construction " -#~ "(:code:`.whl` et :code:`.tar.gz`)." +#~ "Après avoir chargé l'ensemble de données" +#~ " avec :code:`load_data()`, nous effectuons " +#~ "une propagation vers l'avant pour " +#~ "initialiser le modèle et les paramètres" +#~ " du modèle avec :code:`model(init)`. " +#~ "Ensuite, nous implémentons un client " +#~ "Flower." #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "Deuxièmement, créer un environnement virtuel" -#~ " (et l'activer). Si vous choisissez " -#~ "d'utiliser :code:`pyenv` (avec le plugin " -#~ ":code:`pyenv-virtualenv`) et que vous " -#~ "l'avez déjà installé, vous pouvez " -#~ "utiliser le script suivant (par défaut" -#~ " il utilisera :code:`Python 3.8.17`, mais" -#~ " vous pouvez le changer en " -#~ "fournissant une :code:`` spécifique)::" - -#~ msgid "server.strategy.FedAvg" -#~ msgstr "serveur.stratégie.FedAvg" - -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "stratégie.serveur.FedAvgM" - -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" - -#~ msgid "server.strategy.FedOpt" -#~ msgstr "serveur.stratégie.FedOpt" - -#~ msgid "server.strategy.FedProx" -#~ msgstr "serveur.stratégie.FedProx" - -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "serveur.stratégie.FedAdagrad" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "l'implémentation de l'interface :code:`Client` " +#~ "lorsque ta charge de travail utilise " +#~ "MXNet. L'implémentation de :code:`NumPyClient` " +#~ "signifie généralement la définition des " +#~ "méthodes suivantes (:code:`set_parameters` est " +#~ "cependant facultatif) :" -#~ msgid "server.strategy.FedAdam" -#~ msgstr "serveur.stratégie.FedAdam" +#~ msgid "They can be implemented in the following way:" +#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "serveur.stratégie.FedYogi" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`MNISTClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" #~ msgid "" -#~ "`achiverram28`, `Adam Narozniak`, `Anass " -#~ "Anhari`, `Charles Beauville`, `Dana-Farber`," -#~ " `Daniel J. Beutel`, `Daniel Nata " -#~ "Nugraha`, `Edoardo Gabrielli`, `eunchung`, " -#~ "`Gustavo Bertoli`, `Heng Pan`, `Javier`, " -#~ "`Mahdi`, `Ruth Galindo`, `Steven Hé " -#~ "(Sīchàng)`, `Taner Topal`" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." #~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"0.0.0:8080\"` indique au client à " +#~ "quel serveur se connecter. Dans notre" +#~ " cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" +#~ " charge de travail véritablement fédérée" +#~ " avec le serveur et les clients " +#~ "s'exécutant sur des machines différentes, " +#~ "tout ce qui doit changer est " +#~ ":code:`server_address` que nous transmettons " +#~ "au client." #~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" #~ msgstr "" -#~ "Chargeons maintenant l'ensemble de formation" -#~ " et de test CIFAR-10, partitionnons-" -#~ "les en dix ensembles de données " -#~ "plus petits (chacun divisé en ensemble" -#~ " de formation et de validation), et" -#~ " enveloppons les partitions résultantes en" -#~ " créant un PyTorch ``DataLoader`` pour " -#~ "chacun d'entre eux :" +#~ "Le client et le serveur étant " +#~ "prêts, nous pouvons maintenant tout " +#~ "exécuter et voir l'apprentissage fédéré " +#~ "en action. Les systèmes d'apprentissage " +#~ "fédéré ont généralement un serveur et" +#~ " plusieurs clients. Nous devons donc " +#~ "commencer par démarrer le serveur :" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." #~ msgstr "" -#~ "Construisons un système d'apprentissage fédéré" -#~ " horizontal en utilisant XGBoost et " -#~ "Flower !" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-mxnet`." -#~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." -#~ msgstr "" -#~ "Réfère-toi à l'exemple de code " -#~ "complet `_ pour en " -#~ "savoir plus." +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" -#~ msgid "|3ff4c820a01d4a5abb022617de537c54|" -#~ msgstr "" +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" -#~ msgid "|7f1889391ad448e2a65920165f0d798c|" -#~ msgstr "" +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" -#~ msgid "|a171dc4a0d044e70b5d585cc10ace0e0|" +#~ msgid "Shuffles data and its label" +#~ msgstr "Mélange les données et leur étiquette" + +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" +#~ "Nous chargeons l'ensemble de données " +#~ "MNIST de `OpenML `_," +#~ " un ensemble de données de " +#~ "classification d'images populaires de chiffres" +#~ " manuscrits pour l'apprentissage automatique. " +#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " +#~ "les données d'entraînement et de test." +#~ " L'ensemble d'entraînement est ensuite " +#~ "divisé en 10 partitions avec " +#~ ":code:`utils.partition()`." -#~ msgid "|fe518aa0d86341f7b2fc87bd6e3bbf0c|" +#~ msgid "Let's get stated!" +#~ msgstr "Allons-y, déclarons-le !" + +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" #~ msgstr "" -#~ msgid "|6abfdf0dade44469ae9f08c8dc7d148c|" +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" #~ msgstr "" -#~ msgid "|b4f147db24bb4da9a786e1d6676a1c2d|" +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" #~ msgstr "" -#~ msgid "|5c62032f589a457bb37b5fee5b2adbde|" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" #~ msgstr "" -#~ msgid "|f154df1846dd44f79a94f1dc3ae8b088|" +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" #~ msgstr "" -#~ msgid "|9d20be8160f7451fb0f33b194506503f|" +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" #~ msgstr "" -#~ msgid "|3d949f76988443c59990d2e64f05c386|" +#~ msgid "|7f0ee162da38450788493a21627306f7|" #~ msgstr "" -#~ msgid "|526c6d9140f6404f8a226d9056327b3b|" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" #~ msgstr "" -#~ msgid "|a5f6af14cd7c4550929b17f83b4f63c7|" +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" #~ msgstr "" -#~ msgid "|bcd571c4f4ee4803a54f71b5c20448cb|" +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" #~ msgstr "" -#~ msgid "|c76452ae1ed84965be7ef23c72b95845|" +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" #~ msgstr "" -#~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ msgid "|ec1fe880237247e0975f52766775ab84|" #~ msgstr "" -#~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" #~ msgstr "" -#~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "Flower provides pre-made docker images" +#~ " on `Docker Hub `_" +#~ " that include all necessary dependencies" +#~ " for running the SuperLink. You can" +#~ " also build your own custom docker" +#~ " images from scratch with a different" +#~ " version of Python or Ubuntu if " +#~ "that is what you need. In this " +#~ "guide, we will explain what images " +#~ "exist and how to build them " +#~ "locally." #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "Exemple : PyTorch et MNIST" - #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre," -#~ " comment former un réseau neuronal " -#~ "convolutif sur MNIST en utilisant Flower" -#~ " et PyTorch." #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ "Both, base and SuperLink image are " +#~ "configured via build arguments. Through " +#~ "build arguments, we can make our " +#~ "build more flexible. For example, in " +#~ "the base image, we can specify the" +#~ " version of Python to install using" +#~ " the ``PYTHON_VERSION`` build argument. " +#~ "Some of the build arguments have " +#~ "default values, others must be specified" +#~ " when building the image. All " +#~ "available build arguments for each image" +#~ " are listed in one of the " +#~ "tables below." #~ msgstr "" -#~ "Puisque nous voulons utiliser PyTorch " -#~ "pour résoudre une tâche de vision " -#~ "par ordinateur, installons PyTorch et la" -#~ " bibliothèque **torchvision** :" - -#~ msgid "Ready... Set... Train!" -#~ msgstr "Prêts... prêts... entraînez-vous !" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ msgid "``3.11``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``UBUNTU_VERSION``" #~ msgstr "" -#~ "Maintenant que nous avons installé " -#~ "toutes nos dépendances, lançons un " -#~ "simple entraînement distribué avec deux " -#~ "clients et un serveur. Notre procédure" -#~ " d'entraînement et l'architecture de notre" -#~ " réseau sont basées sur l'exemple " -#~ "MNIST de base de PyTorch " -#~ "`_. Cela" -#~ " te permettra de voir à quel " -#~ "point il est facile d'envelopper ton " -#~ "code avec Flower et de commencer " -#~ "l'entraînement de manière fédérée. Nous " -#~ "te fournissons deux scripts d'aide, à" -#~ " savoir *run-server.sh*, et *run-" -#~ "clients.sh*. N'aie pas peur de regarder" -#~ " à l'intérieur, ils sont assez " -#~ "simples =)." -#~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "Lance sur un terminal le script *run-server.sh* d'abord comme suit :" +#~ msgid "Version of the official Ubuntu Docker image." +#~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "Maintenant que le serveur est opérationnel, vas-y et lance les clients." +#~ msgid "Defaults to ``22.04``." +#~ msgstr "" #~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." +#~ "The following example creates a base " +#~ "image with Python 3.11.0, pip 23.0.1 " +#~ "and setuptools 69.0.2:" #~ msgstr "" -#~ "Et voilà ! Tu devrais voir la " -#~ "procédure d'entraînement et, après quelques" -#~ " itérations, la précision du test " -#~ "pour chaque client." -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "Maintenant, voyons ce qui se passe réellement à l'intérieur." +#~ msgid "Building the SuperLink image" +#~ msgstr "Démarrer le serveur" -#~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ msgid "Defaults to ``flwr/base``." #~ msgstr "" -#~ "Dans le script d'aide au serveur " -#~ "*run-server.sh*, tu trouveras le code " -#~ "suivant qui exécute le fichier " -#~ ":code:`server.py`" -#~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ msgid "The Python version of the base image." +#~ msgstr "Évaluer la réponse d'un client." + +#~ msgid "Defaults to ``py3.11``." #~ msgstr "" -#~ "Nous pouvons aller un peu plus " -#~ "loin et voir que :code:`server.py` lance" -#~ " simplement un serveur qui coordonnera " -#~ "trois tours de formation. Flower Les " -#~ "serveurs sont très personnalisables, mais " -#~ "pour les charges de travail simples, " -#~ "nous pouvons démarrer un serveur à " -#~ "l'aide de la fonction :ref:`start_server " -#~ "` et " -#~ "laisser toutes les possibilités de " -#~ "configuration à leurs valeurs par " -#~ "défaut, comme on peut le voir " -#~ "ci-dessous." -#~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "" + +#~ msgid "The PyPI package to install." #~ msgstr "" -#~ "Ensuite, jetons un coup d'œil au " -#~ "fichier *run-clients.sh*. Tu verras " -#~ "qu'il contient la boucle principale qui" -#~ " démarre un ensemble de *clients*." + +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Flux de travail" #~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ "The following example creates a " +#~ "SuperLink image with the official Flower" +#~ " base image py3.11-ubuntu22.04 and Flower" +#~ " 1.8.0:" #~ msgstr "" -#~ "**cid** : c'est l'identifiant du client." -#~ " C'est un nombre entier qui identifie" -#~ " de façon unique l'identifiant du " -#~ "client." -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address** : Chaîne qui identifie l'IP et le port du serveur." +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" #~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY``, " +#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " +#~ "arguments." #~ msgstr "" -#~ "**Cette information n'est pas requise " -#~ "par le client, mais elle nous aide" -#~ " à partitionner l'ensemble de données " -#~ "MNIST original pour nous assurer que " -#~ "chaque client travaille sur des sous-" -#~ "ensembles uniques des ensembles *formation*" -#~ " et *test*." + +#~ msgid "Creating New Messages" +#~ msgstr "Création de nouveaux messages" #~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." #~ msgstr "" -#~ "Encore une fois, nous pouvons aller " -#~ "plus loin et regarder dans " -#~ ":code:`flwr_example/quickstart-pytorch/client.py`. Après" -#~ " avoir parcouru le code d'analyse des" -#~ " arguments au début de notre fonction" -#~ " :code:`main`, tu trouveras un appel " -#~ "à :code:`mnist.load_data`. Cette fonction est" -#~ " responsable du partitionnement des " -#~ "ensembles de données MNIST originaux " -#~ "(*training* et *test*) et renvoie un " -#~ ":code:`torch.utils.data.DataLoader` s pour chacun" -#~ " d'entre eux. Nous instancions ensuite " -#~ "un objet :code:`PytorchMNISTClient` avec notre" -#~ " ID client, nos DataLoaders, le " -#~ "nombre d'époques dans chaque tour et " -#~ "le périphérique que nous voulons " -#~ "utiliser pour l'entraînement (CPU ou " -#~ "GPU)." +#~ "Voici un guide simple pour créer " +#~ "un nouveau type de message entre " +#~ "le serveur et les clients dans " +#~ "Flower." #~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." #~ msgstr "" -#~ "L'objet :code:`PytorchMNISTClient` est finalement" -#~ " transmis à :code:`fl.client.start_client` avec" -#~ " l'adresse du serveur lorsque le " -#~ "processus de formation commence." +#~ "Supposons que nous ayons les fonctions" +#~ " suivantes dans :code:`server.py` et " +#~ ":code:`numpy_client.py`..." -#~ msgid "A Closer Look" -#~ msgstr "Regarder de plus près" +#~ msgid "Server's side:" +#~ msgstr "Côté serveur :" + +#~ msgid "Client's side:" +#~ msgstr "Côté client :" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" #~ msgstr "" -#~ "Maintenant, examinons de près le " -#~ ":code:`PytorchMNISTClient` à l'intérieur du " -#~ ":code:`flwr_example.quickstart-pytorch.mnist` et " -#~ "voyons ce qu'il fait :" +#~ "Voyons maintenant ce que nous devons " +#~ "mettre en œuvre pour que cette " +#~ "simple fonction entre le serveur et " +#~ "le client fonctionne !" + +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "Types de messages pour les tampons de protocole" #~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ "La première chose à remarquer est " -#~ "que :code:`PytorchMNISTClient` instancie un " -#~ "modèle CNN dans son constructeur" +#~ "La première chose à faire est de" +#~ " définir un type de message pour " +#~ "le système RPC dans :code:`transport.proto`." +#~ " Notez que nous devons le faire " +#~ "à la fois pour les messages de " +#~ "demande et de réponse. Pour plus " +#~ "de détails sur la syntaxe de " +#~ "proto3, veuillez consulter la `documentation" +#~ " officielle `_." + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "Dans le bloc :code:`ServerMessage` :" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "Dans le bloc ClientMessage :" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." #~ msgstr "" -#~ "Le code du CNN est disponible sous" -#~ " :code:`quickstart-pytorch.mnist` et il est" -#~ " reproduit ci-dessous. Il s'agit du" -#~ " même réseau que celui que l'on " -#~ "trouve dans `Exemple basique de MNIST" -#~ " `_." +#~ "Veille à ajouter également un champ " +#~ "du type de message nouvellement créé " +#~ "dans :code:`oneof msg`." -#~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" -#~ msgstr "" -#~ "La deuxième chose à noter est que" -#~ " la classe :code:`PytorchMNISTClient` hérite " -#~ "de :code:`fl.client.Client`, et qu'elle doit" -#~ " donc implémenter les méthodes suivantes" -#~ " :" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" -#~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." -#~ msgstr "" -#~ "En comparant la classe abstraite à " -#~ "sa classe dérivée :code:`PytorchMNISTClient`, " -#~ "tu remarqueras que :code:`fit` appelle " -#~ "une fonction :code:`train` et que " -#~ ":code:`evaluate` appelle une fonction " -#~ ":code:`test` :." +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" + +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "Fonctions de sérialisation et de désérialisation" #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." #~ msgstr "" -#~ "Ces fonctions se trouvent toutes deux" -#~ " dans le même module :code:`quickstart-" -#~ "pytorch.mnist` :" +#~ "La prochaine étape consiste à ajouter" +#~ " des fonctions pour sérialiser et " +#~ "désérialiser les types de données Python" +#~ " vers ou à partir des types de" +#~ " messages RPC définis. Tu dois " +#~ "ajouter ces fonctions dans :code:`serde.py`." + +#~ msgid "The four functions:" +#~ msgstr "Les quatre fonctions :" + +#~ msgid "Sending the Message from the Server" +#~ msgstr "Envoi du message à partir du serveur" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" #~ msgstr "" -#~ "Observe que ces fonctions encapsulent " -#~ "les boucles d'entraînement et de test" -#~ " habituelles et fournissent à :code:`fit`" -#~ " et :code:`evaluate` les statistiques " -#~ "finales pour chaque tour. Tu pourrais" -#~ " les remplacer par tes boucles " -#~ "d'entraînement et de test personnalisées " -#~ "et changer l'architecture du réseau, et" -#~ " l'ensemble de l'exemple fonctionnerait " -#~ "toujours parfaitement. En fait, pourquoi " -#~ "ne pas essayer de modifier le code" -#~ " pour en faire un exemple qui " -#~ "te plairait ?" +#~ "Écris maintenant la fonction de demande" +#~ " dans ta classe Client Proxy (par " +#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" +#~ " les fonctions serde que tu viens " +#~ "de créer :" -#~ msgid "Give It a Try" -#~ msgstr "Fais un essai" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "Réception du message par le client" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" #~ msgstr "" -#~ "En parcourant la description du code " -#~ "de démarrage rapide ci-dessus, tu " -#~ "auras acquis une bonne compréhension du" -#~ " fonctionnement des *clients* et des " -#~ "*serveurs* dans Flower, de l'exécution " -#~ "d'une expérience simple et de la " -#~ "structure interne d'un wrapper client. " -#~ "Voici quelques exemples que tu peux " -#~ "essayer par toi-même pour acquérir " -#~ "plus d'expérience avec Flower :" +#~ "Dernière étape ! Modifie le code " +#~ "dans :code:`message_handler.py` pour vérifier " +#~ "le champ de ton message et appeler" +#~ " la fonction :code:`example_response`. N'oublie" +#~ " pas d'utiliser les fonctions serde !" -#~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ msgid "Within the handle function:" +#~ msgstr "Dans le cadre de la fonction de poignée :" + +#~ msgid "And add a new function:" +#~ msgstr "Et ajoute une nouvelle fonction :" + +#~ msgid "Hopefully, when you run your program you will get the intended result!" #~ msgstr "" -#~ "Essaie de modifier :code:`PytorchMNISTClient` " -#~ "pour qu'il puisse accepter différentes " -#~ "architectures." +#~ "Avec un peu de chance, lorsque tu" +#~ " exécuteras ton programme, tu obtiendras" +#~ " le résultat escompté !" #~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." #~ msgstr "" -#~ "Modifie la fonction :code:`train` pour " -#~ "qu'elle accepte différents optimiseurs" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." #~ msgstr "" -#~ "Modifie la fonction :code:`test` pour " -#~ "qu'elle prouve non seulement le top-1" -#~ " (précision normale) mais aussi le " -#~ "top-5 ?" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." #~ msgstr "" -#~ "Essaie d'adapter le code à des " -#~ "images et à des ensembles de " -#~ "données plus grands. Pourquoi ne pas " -#~ "essayer de s'entraîner sur ImageNet avec" -#~ " un ResNet-50 ?" - -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "Tu es prêt maintenant. Profite de l'apprentissage de manière fédérée !" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." #~ msgstr "" -#~ "Flower fournit des classes d'enveloppe " -#~ "de confidentialité différentielle (DP) pour" -#~ " l'intégration facile des garanties " -#~ "centrales de DP fournies par DP-" -#~ "FedAvg dans les pipelines de formation" -#~ " définis dans n'importe lequel des " -#~ "divers cadres de ML avec lesquels " -#~ "Flower est compatible." #~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" #~ msgstr "" -#~ "Note que ces composants sont encore " -#~ "expérimentaux, la configuration correcte du" -#~ " DP pour une tâche spécifique est " -#~ "encore un problème non résolu." #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" -#~ "Le nom DP-FedAvg est trompeur car" -#~ " il peut être appliqué à n'importe" -#~ " quel algorithme FL qui se conforme" -#~ " à la structure générale prescrite " -#~ "par la famille d'algorithmes FedOpt." - -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" -#~ "DP-FedAvg, proposé à l'origine par " -#~ "McMahan et al. [mcmahan]_ et étendu " -#~ "par Andrew et al. [andrew]_, est " -#~ "essentiellement FedAvg avec les modifications" -#~ " suivantes." #~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." #~ msgstr "" -#~ "**Clipping** : L'influence de la mise" -#~ " à jour de chaque client est " -#~ "limitée en l'écrêtant. Ceci est réalisé" -#~ " en imposant un plafond à la " -#~ "norme L2 de la mise à jour, " -#~ "en la réduisant si nécessaire." #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." #~ msgstr "" -#~ "**Bruit** : un bruit gaussien, calibré" -#~ " sur le seuil d'écrêtage, est ajouté" -#~ " à la moyenne calculée au niveau " -#~ "du serveur." #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." #~ msgstr "" -#~ "Il a été démontré que la " -#~ "distribution de la norme de mise à" -#~ " jour varie d'une tâche à l'autre " -#~ "et évolue au fur et à mesure " -#~ "de la formation. C'est pourquoi nous " -#~ "utilisons une approche adaptative [andrew]_" -#~ " qui ajuste continuellement le seuil " -#~ "d'écrêtage pour suivre un quantile " -#~ "prédéfini de la distribution de la " -#~ "norme de mise à jour." -#~ msgid "Simplifying Assumptions" -#~ msgstr "Simplifier les hypothèses" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr "" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" + +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" #~ msgstr "" -#~ "Nous formulons (et tentons d'appliquer) " -#~ "un certain nombre d'hypothèses qui " -#~ "doivent être satisfaites pour que le " -#~ "processus de formation réalise réellement " -#~ "les garanties :math:`(\\epsilon, \\delta)` que" -#~ " l'utilisateur a à l'esprit lorsqu'il " -#~ "configure l'installation." -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "flower-fleet-api" + +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" #~ msgstr "" -#~ "**Sous-échantillonnage de taille fixe** " -#~ ":Des sous-échantillons de taille fixe" -#~ " des clients doivent être prélevés à" -#~ " chaque tour, par opposition aux " -#~ "sous-échantillons de Poisson de taille " -#~ "variable." -#~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" #~ msgstr "" -#~ "**Moyenne non pondérée** : Les " -#~ "contributions de tous les clients " -#~ "doivent être pondérées de façon égale" -#~ " dans l'ensemble afin que le serveur" -#~ " n'ait pas à connaître à l'avance " -#~ "la somme des poids de tous les " -#~ "clients disponibles pour la sélection." -#~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" #~ msgstr "" -#~ "**Aucune défaillance de client** : " -#~ "L'ensemble des clients disponibles doit " -#~ "rester constant pendant toutes les " -#~ "séries de formation. En d'autres termes," -#~ " les clients ne peuvent pas " -#~ "abandonner ou échouer." -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" #~ msgstr "" -#~ "Les deux premiers sont utiles pour " -#~ "éliminer une multitude de complications " -#~ "liées au calibrage du bruit en " -#~ "fonction du seuil d'écrêtage, tandis que" -#~ " le troisième est nécessaire pour se" -#~ " conformer aux hypothèses de l'analyse " -#~ "de la vie privée." -#~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" #~ msgstr "" -#~ "Ces restrictions sont conformes aux " -#~ "contraintes imposées par Andrew et al." -#~ " [andrew]_." -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "Responsabilité personnalisable pour l'injection de bruit" +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "" -#~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" #~ msgstr "" -#~ "Contrairement à d'autres implémentations où" -#~ " l'ajout de bruit est effectué au " -#~ "niveau du serveur, tu peux configurer" -#~ " le site d'injection de bruit pour" -#~ " qu'il corresponde mieux à ton modèle" -#~ " de menace. Nous offrons aux " -#~ "utilisateurs la possibilité de configurer " -#~ "l'entraînement de telle sorte que chaque" -#~ " client ajoute indépendamment une petite" -#~ " quantité de bruit à la mise à" -#~ " jour écrêtée, ce qui fait que " -#~ "le simple fait d'agréger les mises " -#~ "à jour bruyantes équivaut à l'ajout " -#~ "explicite de bruit à l'agrégat non " -#~ "bruyant au niveau du serveur." -#~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" #~ msgstr "" -#~ "Pour être précis, si nous laissons " -#~ ":math:`m` être le nombre de clients " -#~ "échantillonnés à chaque tour et " -#~ ":math:\\sigma_\\Delta` être l'échelle du bruit" -#~ " gaussien total qui doit être ajouté" -#~ " à la somme des mises à jour" -#~ " du modèle, nous pouvons utiliser des" -#~ " mathématiques simples pour montrer que " -#~ "cela équivaut à ce que chaque " -#~ "client ajoute du bruit avec l'échelle" -#~ " :math:\\sigma_\\Delta/\\sqrt{m}`." -#~ msgid "Wrapper-based approach" -#~ msgstr "Approche basée sur l'enveloppe" +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "" -#~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "" + +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "" + +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "" + +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "" + +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" #~ msgstr "" -#~ "L'introduction du DP dans une charge " -#~ "de travail existante peut être " -#~ "considérée comme l'ajout d'une couche de" -#~ " sécurité supplémentaire autour d'elle. " -#~ "Cela nous a incités à fournir la" -#~ " logique supplémentaire côté serveur et " -#~ "côté client nécessaire pour rendre le" -#~ " processus de formation différentiellement " -#~ "privé en tant qu'enveloppes pour les " -#~ "instances des classes abstraites " -#~ ":code:`Strategy` et :code:`NumPyClient` " -#~ "respectivement. Cette approche basée sur " -#~ "l'enveloppe a l'avantage d'être facilement " -#~ "composable avec d'autres enveloppes que " -#~ "quelqu'un pourrait contribuer à la " -#~ "bibliothèque Flower à l'avenir, par " -#~ "exemple, pour l'agrégation sécurisée. " -#~ "L'utilisation de l'héritage à la place" -#~ " peut être fastidieuse car cela " -#~ "nécessiterait la création de nouvelles " -#~ "sous-classes chaque fois qu'une nouvelle" -#~ " classe mettant en œuvre :code:`Strategy`" -#~ " ou :code:`NumPyClient` est définie." + +#~ msgid "Edge Client Engine" +#~ msgstr "Moteur client Edge" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" #~ msgstr "" -#~ "La première version de notre solution" -#~ " consistait à définir un décorateur " -#~ "dont le constructeur acceptait, entre " -#~ "autres, une variable à valeur booléenne" -#~ " indiquant si l'écrêtage adaptatif devait" -#~ " être activé ou non. Nous nous " -#~ "sommes rapidement rendu compte que cela" -#~ " encombrerait sa fonction :code:`__init__()` " -#~ "avec des variables correspondant aux " -#~ "hyperparamètres de l'écrêtage adaptatif qui" -#~ " resteraient inutilisées lorsque celui-ci" -#~ " était désactivé. Une implémentation plus" -#~ " propre pourrait être obtenue en " -#~ "divisant la fonctionnalité en deux " -#~ "décorateurs, :code:`DPFedAvgFixed` et " -#~ ":code:`DPFedAvgAdaptive`, le second sous-" -#~ "classant le premier. Les constructeurs " -#~ "des deux classes acceptent un paramètre" -#~ " booléen :code:`server_side_noising` qui, comme" -#~ " son nom l'indique, détermine l'endroit " -#~ "où le noising doit être effectué." +#~ "`Flower `_ architecture de " +#~ "base avec Edge Client Engine" + +#~ msgid "Virtual Client Engine" +#~ msgstr "Moteur de client virtuel" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" #~ msgstr "" -#~ "Les capacités côté serveur requises pour" -#~ " la version originale de DP-FedAvg," -#~ " c'est-à-dire celle qui effectue un " -#~ "écrêtage fixe, peuvent être entièrement " -#~ "capturées à l'aide d'une logique " -#~ "d'enveloppement pour les deux méthodes " -#~ "suivantes de la classe abstraite " -#~ ":code:`Strategy`." +#~ "`Flower `_ architecture de " +#~ "base avec moteur de client virtuel" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "" +#~ "Moteur client virtuel et moteur client" +#~ " Edge dans la même charge de " +#~ "travail" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" #~ msgstr "" -#~ ":code:`configure_fit()` : Le dictionnaire de" -#~ " configuration envoyé par la " -#~ ":code:`Strategy` enveloppée à chaque client" -#~ " doit être augmenté d'une valeur " -#~ "supplémentaire égale au seuil d'écrêtage " -#~ "(indiqué sous :code:`dpfedavg_clip_norm`) et, " -#~ "si :code:`server_side_noising=true`, d'une autre " -#~ "égale à l'échelle du bruit gaussien " -#~ "qui doit être ajouté au client " -#~ "(indiqué sous :code:`dpfedavg_noise_stddev`)." +#~ "`Flower `_ architecture de " +#~ "base avec un moteur de client " +#~ "virtuel et un moteur de client " +#~ "périphérique" + +#~ msgid "How to build Docker Flower images locally" +#~ msgstr "" + +#~ msgid "Clone the flower repository." +#~ msgstr "**Fourche le dépôt de Flower**" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." #~ msgstr "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." + +#~ msgid "``22.04``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``23.0.1``" +#~ msgstr "1.0.0rc1" + +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" #~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ "The following example creates a base " +#~ "Ubuntu/Alpine image with Python 3.11.0, " +#~ "pip 23.0.1, setuptools 69.0.2 and Flower" +#~ " 1.8.0:" #~ msgstr "" -#~ "Nous ne pouvons pas modifier directement" -#~ " la fonction d'agrégation de la " -#~ "stratégie enveloppée pour la forcer à" -#~ " ajouter du bruit à l'agrégat, c'est" -#~ " pourquoi nous simulons le bruit côté" -#~ " client pour mettre en œuvre le " -#~ "bruit côté serveur." #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "The name of image is ``flwr_base`` " +#~ "and the tag ``0.1.0``. Remember that " +#~ "the build arguments as well as the" +#~ " name and tag can be adapted to" +#~ " your needs. These values serve as" +#~ " examples only." +#~ msgstr "" + +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Démarrer le serveur" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" #~ msgstr "" -#~ "Ces modifications ont été regroupées " -#~ "dans une classe appelée :code:`DPFedAvgFixed`," -#~ " dont le constructeur accepte la " -#~ "stratégie décorée, le seuil d'écrêtage " -#~ "et le nombre de clients échantillonnés" -#~ " à chaque tour comme arguments " -#~ "obligatoires. L'utilisateur est censé " -#~ "spécifier le seuil d'écrêtage car " -#~ "l'ordre de grandeur des normes de " -#~ "mise à jour dépend fortement du " -#~ "modèle formé et fournir une valeur " -#~ "par défaut serait trompeur. Le nombre" -#~ " de clients échantillonnés à chaque " -#~ "tour est nécessaire pour calculer la " -#~ "quantité de bruit qui doit être " -#~ "ajoutée à chaque mise à jour " -#~ "individuelle, que ce soit par le " -#~ "serveur ou par les clients." #~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" #~ msgstr "" -#~ "La fonctionnalité supplémentaire nécessaire " -#~ "pour faciliter l'écrêtage adaptatif a " -#~ "été fournie dans :code:`DPFedAvgAdaptive`, une" -#~ " sous-classe de :code:`DPFedAvgFixed`. Elle" -#~ " remplace les méthodes mentionnées ci-" -#~ "dessus pour effectuer les opérations " -#~ "suivantes." #~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." +#~ "If you want to use your own " +#~ "base image instead of the official " +#~ "Flower base image, all you need to" +#~ " do is set the ``BASE_REPOSITORY`` " +#~ "build argument." +#~ msgstr "" + +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." #~ msgstr "" -#~ ":code:`configure_fit()` : Il intercepte le " -#~ "dict de configuration renvoyé par " -#~ ":code:`super.configure_fit()` pour y ajouter " -#~ "la paire clé-valeur " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True`, que le " -#~ "client interprète comme une instruction " -#~ "d'inclure un bit indicateur (1 si " -#~ "la norme de mise à jour <= " -#~ "seuil d'écrêtage, 0 sinon) dans les " -#~ "résultats qu'il renvoie." + +#~ msgid "**Via the UI**" +#~ msgstr "**Review the PR**" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." #~ msgstr "" -#~ ":code:`aggregate_fit()` : Il fait suivre " -#~ "un appel à :code:`super.aggregate_fit()` d'un" -#~ " appel à :code:`__update_clip_norm__()`, une " -#~ "procédure qui ajuste le seuil d'écrêtage" -#~ " sur la base des bits indicateurs " -#~ "reçus des clients échantillonnés." #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" + +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" #~ msgstr "" -#~ "Les capacités requises côté client " -#~ "peuvent être entièrement capturées par " -#~ "une logique de wrapper pour la " -#~ "seule méthode :code:`fit()` de la classe" -#~ " abstraite :code:`NumPyClient`. Pour être " -#~ "précis, nous devons *post-traiter* la" -#~ " mise à jour calculée par le " -#~ "client wrapped pour l'écrêter, si " -#~ "nécessaire, à la valeur seuil fournie" -#~ " par le serveur dans le cadre " -#~ "du dictionnaire de configuration. En " -#~ "plus de cela, il peut avoir besoin" -#~ " d'effectuer un travail supplémentaire si" -#~ " l'une des clés suivantes (ou les " -#~ "deux) est également présente dans le " -#~ "dict." #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." #~ msgstr "" -#~ ":code:`dpfedavg_noise_stddev` : Génère et " -#~ "ajoute la quantité de bruit spécifiée" -#~ " à la mise à jour de " -#~ "l'écrêtage." #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." #~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Complète " -#~ "les métriques dict dans l'objet " -#~ ":code:`FitRes` renvoyé au serveur avec " -#~ "un bit indicateur, calculé comme décrit" -#~ " précédemment." -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "Effectuer l'analyse :math:`(\\epsilon, \\delta)`" +#~ msgid "Preliminarities" +#~ msgstr "" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "Exemple : JAX - Exécuter JAX Federated" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" + +#~ msgid ":doc:`How to run Flower using Docker `" #~ msgstr "" -#~ "Supposons que tu te sois entraîné " -#~ "pendant :math:`n` tours avec la fraction" -#~ " d'échantillonnage :math:`q` et le " -#~ "multiplicateur de bruit :math:`z`. Afin " -#~ "de calculer la valeur :math:`epsilon` " -#~ "qui en résulterait pour un " -#~ ":math:`\\delta` particulier, le script suivant" -#~ " peut être utilisé." #~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "Before you start, make sure that the Docker daemon is running:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." #~ msgstr "" -#~ msgid "Flower driver SDK." -#~ msgstr "Serveur de Flower" +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" -#~ msgid "driver" -#~ msgstr "serveur" +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" -#~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgid "Quickstart" +#~ msgstr "Démarrage rapide de JAX" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." #~ msgstr "" -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." #~ msgstr "" -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" #~ msgstr "" -#~ msgid "Get task results." +#~ msgid "Mounting a volume to store the state on the host system" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." #~ msgstr "" -#~ msgid "Schedule tasks." +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." #~ msgstr "" -#~ msgid "GrpcDriver" +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." #~ msgstr "" -#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." #~ msgstr "" -#~ msgid "Connect to the Driver API." +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." #~ msgstr "" -#~ msgid "Request for run ID." -#~ msgstr "Demande pour une nouvelle Flower Baseline" +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." #~ msgstr "" -#~ msgid "Disconnect from the Driver API." +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" + +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" + +#~ msgid "Let's assume the following project layout:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." #~ msgstr "" -#~ msgid "Get client IDs." -#~ msgstr "Moteur client Edge" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." #~ msgstr "" #~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." #~ msgstr "" +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Démarrer le serveur" + #~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." #~ msgstr "" #~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ "Les exemples d'utilisation de Flower " -#~ "étaient auparavant regroupés avec Flower " -#~ "dans un paquet appelé ``flwr_example``. " -#~ "Nous migrons ces exemples vers des " -#~ "projets autonomes pour les rendre plus" -#~ " faciles à utiliser. Tous les " -#~ "nouveaux exemples sont basés dans le " -#~ "répertoire ``examples " -#~ "`_." -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "Démarrage rapide de TensorFlow/Keras" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "Exemples hérités (`flwr_example`)" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" + +#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgstr "" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." #~ msgstr "" -#~ "Les exemples d'utilisation dans `flwr_example`" -#~ " sont obsolètes et seront supprimés à" -#~ " l'avenir. De nouveaux exemples sont " -#~ "fournis en tant que projets autonomes" -#~ " dans `examples " -#~ "`_." -#~ msgid "Extra Dependencies" -#~ msgstr "Dépendances supplémentaires" +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" -#~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ msgid "``--insecure``: This option enables insecure communication." #~ msgstr "" -#~ "Le noyau du framework Flower conserve" -#~ " un ensemble minimal de dépendances. " -#~ "Les exemples démontrent Flower dans le" -#~ " contexte de différents frameworks " -#~ "d'apprentissage automatique, de sorte que " -#~ "des dépendances supplémentaires doivent être" -#~ " installées avant qu'un exemple puisse " -#~ "être exécuté." -#~ msgid "For PyTorch examples::" -#~ msgstr "Pour les exemples de PyTorch: :" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" -#~ msgid "For TensorFlow examples::" -#~ msgstr "Pour les exemples de TensorFlow : :" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "" -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "Pour les exemples PyTorch et TensorFlow: :" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" #~ msgstr "" -#~ "Tu peux consulter :code:`pyproject.toml` pour" -#~ " une liste complète des extras " -#~ "possibles (section :code:`[tool.poetry.extras]`)." -#~ msgid "PyTorch Examples" -#~ msgstr "Exemples de PyTorch" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." #~ msgstr "" -#~ "Nos exemples PyTorch sont basés sur " -#~ "PyTorch 1.7. Ils devraient fonctionner " -#~ "avec d'autres versions également. Jusqu'à " -#~ "présent, nous fournissons les exemples " -#~ "suivants." -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "Classification d'images CIFAR-10" +#~ msgid "" +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." +#~ msgstr "" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." #~ msgstr "" -#~ "`CIFAR-10 et CIFAR-100 " -#~ "`_ sont des" -#~ " ensembles de données d'images RVB " -#~ "populaires. L'exemple Flower CIFAR-10 utilise" -#~ " PyTorch pour former un classificateur " -#~ "CNN simple dans une configuration " -#~ "d'apprentissage fédéré avec deux clients." -#~ msgid "First, start a Flower server:" -#~ msgstr "Tout d'abord, démarre un serveur Flower :" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." #~ msgstr "" -#~ "Ensuite, démarre les deux clients dans" -#~ " une nouvelle fenêtre de terminal :" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "Pour plus de détails, voir :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 Classification des images" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." #~ msgstr "" -#~ "`ImageNet-2012 `_ est " -#~ "l'un des principaux ensembles de données" -#~ " de vision par ordinateur. L'exemple " -#~ "Flower ImageNet utilise PyTorch pour " -#~ "entraîner un classificateur ResNet-18 dans " -#~ "une configuration d'apprentissage fédéré avec" -#~ " dix clients." -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Démarrer le serveur" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "Now that we have built the ServerApp image, we can finally run it." #~ msgstr "" -#~ "Pour plus de détails, voir " -#~ ":code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgid "TensorFlow Examples" -#~ msgstr "Exemples de TensorFlow" +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" #~ msgstr "" -#~ "Nos exemples TensorFlow sont basés sur" -#~ " TensorFlow 2.0 ou une version plus" -#~ " récente. Jusqu'à présent, nous te " -#~ "proposons les exemples suivants." - -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Classification d'images Fashion-MNIST" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." #~ msgstr "" -#~ "nous suivons cette tradition et " -#~ "fournissons un exemple qui échantillonne " -#~ "des ensembles de données locales " -#~ "aléatoires de Fashion-MNIST et entraîne" -#~ " un modèle simple de classification " -#~ "d'images sur ces partitions." - -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." #~ msgstr "" -#~ "Pour plus de détails, voir " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" + +#~ msgid "Run with root user privileges" #~ msgstr "" #~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "**Run a container with root user privileges**" #~ msgstr "" -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "**Run the build process with root user privileges**" #~ msgstr "" -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgid "Using a different Flower version" #~ msgstr "" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "Pinning a Docker image to a specific version" #~ msgstr "" -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." #~ msgstr "" -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" #~ msgstr "" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" #~ msgstr "" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." #~ msgstr "" -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgid "" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." #~ msgstr "" -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgid ":py:obj:`flwr.client `\\" #~ msgstr "" -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgid ":py:obj:`flwr.common `\\" #~ msgstr "" -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid ":py:obj:`flwr.server `\\" #~ msgstr "" -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgid ":py:obj:`flwr.simulation `\\" #~ msgstr "" -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Flower provides pre-made docker images" -#~ " on `Docker Hub " -#~ "`_ that include" -#~ " all necessary dependencies for running " -#~ "the server. You can also build " -#~ "your own custom docker images from " -#~ "scratch with a different version of " -#~ "Python or Ubuntu if that is what" -#~ " you need. In this guide, we " -#~ "will explain what images exist and " -#~ "how to build them locally." +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr "serveur.stratégie.Stratégie" + +#~ msgid ":py:obj:`Context `\\ \\(state\\)" #~ msgstr "" -#~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " base image and a server image. " -#~ "There will also be a client image" -#~ " soon. The base image, as the " -#~ "name suggests, contains basic dependencies " -#~ "that both the server and the " -#~ "client need. This includes system " -#~ "dependencies, Python and Python tools. " -#~ "The server image is based on the" -#~ " base image, but it additionally " -#~ "installs the Flower server using " -#~ "``pip``." +#~ msgid "State of your run." +#~ msgstr "" + +#~ msgid "Metrics record." #~ msgstr "" #~ msgid "" -#~ "Both, base and server image are " -#~ "configured via build arguments. Through " -#~ "build arguments, we can make our " -#~ "build more flexible. For example, in " -#~ "the base image, we can specify the" -#~ " version of Python to install using" -#~ " the ``PYTHON_VERSION`` build argument. " -#~ "Some of the build arguments have " -#~ "default values, others must be specified" -#~ " when building the image. All " -#~ "available build arguments for each image" -#~ " are listed in one of the " -#~ "tables below." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" #~ msgstr "" -#~ msgid "Defaults to ``flwr/server``." +#~ msgid "Remove all items from R." #~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "The image tag of the base image." +#~ msgid "d defaults to None." #~ msgstr "" -#~ msgid "Defaults to ``py3.11-ubuntu22.04``." +#~ msgid "Update R from dict/iterable E and F." #~ msgstr "" #~ msgid "" -#~ "The following example creates a server" -#~ " image with the official Flower base" -#~ " image py3.11-ubuntu22.04 and Flower 1.7.0:" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_server`` " -#~ "and the tag ``0.1.0``. Remember that " -#~ "the build arguments as well as the" -#~ " name and tag can be adapted to" -#~ " your needs. These values serve as" -#~ " examples only." +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY`` " -#~ "and ``BASE_IMAGE_TAG`` build arguments. The" -#~ " value of ``BASE_REPOSITORY`` must match" -#~ " the name of your image and the" -#~ " value of ``BASE_IMAGE_TAG`` must match " -#~ "the tag of your image." +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "Ouvre un RP (comme indiqué ci-dessus)" - -#~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr "" -#~ msgid "Changelog entry" -#~ msgstr "Changelog" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr "" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." #~ msgstr "" -#~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr "" + +#~ msgid "An identifier telling which data partition a ClientApp should use." #~ msgstr "" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" +#~ msgstr "" + +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "flower-fleet-api" + +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "key shares." #~ msgstr "" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." #~ msgstr "" +#~ "Dans ce tutoriel, nous allons apprendre" +#~ " à entraîner un réseau neuronal " +#~ "convolutif sur CIFAR10 à l'aide de " +#~ "Flower et PyTorch." #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" +#~ "*Les clients* sont chargés de générer" +#~ " des mises à jour de poids " +#~ "individuelles pour le modèle en fonction" +#~ " de leurs ensembles de données " +#~ "locales. Ces mises à jour sont " +#~ "ensuite envoyées au *serveur* qui les" +#~ " agrège pour produire un meilleur " +#~ "modèle. Enfin, le *serveur* renvoie " +#~ "cette version améliorée du modèle à " +#~ "chaque *client*. Un cycle complet de " +#~ "mises à jour de poids s'appelle un" +#~ " *round*." #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" #~ msgstr "" +#~ "Maintenant que nous avons une idée " +#~ "générale de ce qui se passe, " +#~ "commençons. Nous devons d'abord installer " +#~ "Flower. Tu peux le faire en " +#~ "exécutant :" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" +#~ "Puisque nous voulons utiliser PyTorch " +#~ "pour résoudre une tâche de vision " +#~ "par ordinateur, allons-y et installons " +#~ "PyTorch et la bibliothèque **torchvision** " +#~ ":" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" +#~ "Maintenant que nous avons installé " +#~ "toutes nos dépendances, lançons une " +#~ "formation distribuée simple avec deux " +#~ "clients et un serveur. Notre procédure" +#~ " de formation et l'architecture de " +#~ "notre réseau sont basées sur `Deep " +#~ "Learning with PyTorch " +#~ "`_" +#~ " de PyTorch." -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "Exemple : MXNet - Exécuter MXNet Federated" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "" +#~ "Dans un fichier appelé :code:`client.py`, " +#~ "importe Flower et les paquets liés " +#~ "à PyTorch :" + +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "" +#~ "En outre, nous définissons l'attribution " +#~ "des appareils dans PyTorch avec :" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ "Ce tutoriel te montrera comment utiliser" -#~ " Flower pour construire une version " -#~ "fédérée d'une charge de travail MXNet" -#~ " existante. Nous utilisons MXNet pour " -#~ "former un modèle séquentiel sur " -#~ "l'ensemble de données MNIST. Nous " -#~ "structurerons l'exemple de la même " -#~ "manière que notre présentation `PyTorch " -#~ "- De la centralisation à la " -#~ "fédération `_. " -#~ "MXNet et PyTorch sont très similaires" -#~ " et une très bonne comparaison entre" -#~ " MXNet et PyTorch est donnée ici " -#~ "`_. Tout " -#~ "d'abord, nous construisons une approche " -#~ "de formation centralisée basée sur le" -#~ " tutoriel `Handandwritten Digit Recognition " -#~ "`_." -#~ " Ensuite, nous nous basons sur le " -#~ "code de formation centralisé pour " -#~ "exécuter la formation de manière " -#~ "fédérée." +#~ "Nous utilisons PyTorch pour charger " +#~ "CIFAR10, un ensemble de données de " +#~ "classification d'images colorées populaire " +#~ "pour l'apprentissage automatique. Le " +#~ ":code:`DataLoader()` de PyTorch télécharge les" +#~ " données d'entraînement et de test " +#~ "qui sont ensuite normalisées." + +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" +#~ "Définis la perte et l'optimiseur avec" +#~ " PyTorch L'entraînement de l'ensemble de" +#~ " données se fait en bouclant sur " +#~ "l'ensemble de données, en mesurant la" +#~ " perte correspondante et en l'optimisant." + +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" +#~ "Définis ensuite la validation du réseau" +#~ " d'apprentissage automatique. Nous passons " +#~ "en boucle sur l'ensemble de test " +#~ "et mesurons la perte et la " +#~ "précision de l'ensemble de test." #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." #~ msgstr "" -#~ "Avant de commencer à configurer notre" -#~ " exemple MXNet, nous installons les " -#~ "paquets :code:`mxnet` et :code:`flwr` :" - -#~ msgid "MNIST Training with MXNet" -#~ msgstr "Formation MNIST avec MXNet" +#~ "Après avoir défini l'entraînement et le" +#~ " test d'un modèle d'apprentissage " +#~ "automatique PyTorch, nous utilisons les " +#~ "fonctions pour les clients Flower." #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" #~ msgstr "" -#~ "Nous commençons par une brève " -#~ "description du code d'entraînement centralisé" -#~ " basé sur un modèle :code:`Sequential`. " -#~ "Si tu veux une explication plus " -#~ "approfondie de ce qui se passe, " -#~ "jette un coup d'œil au tutoriel " -#~ "officiel `MXNet " -#~ "`_." +#~ "Les clients de Flower utiliseront un " +#~ "CNN simple adapté de \"PyTorch : A" +#~ " 60 Minute Blitz\" :" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" -#~ "Créons un nouveau fichier appelé " -#~ ":code:`mxnet_mnist.py` avec tous les " -#~ "composants requis pour un apprentissage " -#~ "MNIST traditionnel (centralisé). Tout d'abord," -#~ " le package MXNet :code:`mxnet` doit " -#~ "être importé. Tu peux voir que " -#~ "nous n'avons pas encore importé le " -#~ "package :code:`flwr` pour l'apprentissage " -#~ "fédéré. Cela sera fait plus tard." +#~ "Après avoir chargé l'ensemble des " +#~ "données avec :code:`load_data()`, nous " +#~ "définissons l'interface Flower." #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "La fonction :code:`load_data()` charge les " -#~ "ensembles d'entraînement et de test " -#~ "MNIST." +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise PyTorch. Mettre en œuvre" +#~ " :code:`NumPyClient` signifie généralement " +#~ "définir les méthodes suivantes " +#~ "(:code:`set_parameters` est cependant facultatif)" +#~ " :" + +#~ msgid "which can be implemented in the following way:" +#~ msgstr "qui peut être mis en œuvre de la manière suivante :" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" -#~ "Comme nous l'avons déjà mentionné, nous" -#~ " utiliserons l'ensemble de données MNIST" -#~ " pour cette charge de travail " -#~ "d'apprentissage automatique. L'architecture du " -#~ "modèle (un modèle :code:`Séquentiel` très " -#~ "simple) est définie dans :code:`model()`." +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " code source complet " +#~ "`_ de cet exemple se " +#~ "trouve dans :code:`examples/quickstart-pytorch`." #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" #~ msgstr "" -#~ "Nous devons maintenant définir la " -#~ "formation (fonction :code:`train()`) qui passe" -#~ " en boucle sur l'ensemble de la " -#~ "formation et mesure la perte pour " -#~ "chaque lot d'exemples de formation." #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" -#~ "L'évaluation du modèle est définie dans" -#~ " la fonction :code:`test()`. Cette fonction" -#~ " passe en boucle sur tous les " -#~ "échantillons de test et mesure la " -#~ "perte et la précision du modèle en" -#~ " fonction de l'ensemble des données " -#~ "de test." #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" #~ msgstr "" -#~ "Après avoir défini le chargement des " -#~ "données, l'architecture du modèle, " -#~ "l'entraînement et l'évaluation, nous pouvons" -#~ " tout assembler et entraîner notre " -#~ "modèle sur MNIST. Note que le " -#~ "dispositif GPU/CPU pour l'entraînement et " -#~ "le test est défini dans le " -#~ ":code:`ctx` (contexte)." -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" -#~ "Tu peux maintenant exécuter ta charge" -#~ " de travail (centralisée) d'apprentissage " -#~ "automatique MXNet :" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." #~ msgstr "" -#~ "Jusqu'à présent, tout cela devrait te" -#~ " sembler assez familier si tu as " -#~ "déjà utilisé MXNet (ou même PyTorch)." -#~ " Passons à l'étape suivante et " -#~ "utilisons ce que nous avons construit" -#~ " pour créer un simple système " -#~ "d'apprentissage fédéré composé d'un serveur" -#~ " et de deux clients." -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet rencontre Flower" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" -#~ "Jusqu'à présent, il n'était pas facile" -#~ " d'utiliser les charges de travail " -#~ "MXNet pour l'apprentissage fédéré car " -#~ "l'apprentissage fédéré n'est pas pris en" -#~ " charge dans MXNet. Comme Flower est" -#~ " totalement agnostique vis-à-vis du cadre" -#~ " d'apprentissage automatique sous-jacent, " -#~ "il peut être utilisé pour fédérer " -#~ "des charges de travail d'apprentissage " -#~ "automatique arbitraires. Cette section te " -#~ "montrera comment Flower peut être " -#~ "utilisé pour fédérer notre charge de " -#~ "travail MXNet centralisée." + +#~ msgid "Implementing a Flower client" +#~ msgstr "Mise en place d'un client Flower" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" -#~ "Le concept pour fédérer une charge " -#~ "de travail existante est toujours le " -#~ "même et facile à comprendre. Nous " -#~ "devons démarrer un *serveur* et ensuite" -#~ " utiliser le code dans " -#~ ":code:`mxnet_mnist.py` pour les *clients* qui" -#~ " sont connectés au *serveur*. Le " -#~ "*serveur* envoie les paramètres du " -#~ "modèle aux clients. Les *clients* " -#~ "exécutent la formation et mettent à " -#~ "jour les paramètres. Les paramètres mis" -#~ " à jour sont renvoyés au *serveur*" -#~ " qui fait la moyenne de toutes " -#~ "les mises à jour de paramètres " -#~ "reçues. Ceci décrit un tour du " -#~ "processus d'apprentissage fédéré et nous " -#~ "répétons cette opération pour plusieurs " -#~ "tours." +#~ "Pour mettre en œuvre le client " +#~ "Flower, nous créons une sous-classe " +#~ "de ``flwr.client.NumPyClient`` et mettons en" +#~ " œuvre les trois méthodes " +#~ "``get_parameters``, ``fit`` et ``evaluate`` :" #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" -#~ "Enfin, nous allons définir la logique" -#~ " de notre *client* dans :code:`client.py`" -#~ " et nous appuyer sur l'entraînement " -#~ "MXNet défini précédemment dans " -#~ ":code:`mxnet_mnist.py`. Notre *client* doit " -#~ "importer :code:`flwr`, mais aussi " -#~ ":code:`mxnet` pour mettre à jour les " -#~ "paramètres de notre modèle MXNet :" +#~ "La fonction ``start_simulation`` accepte un" +#~ " certain nombre d'arguments, parmi lesquels" +#~ " le ``client_fn`` utilisé pour créer " +#~ "les instances ``FlowerClient``, le nombre " +#~ "de clients à simuler (``num_clients``), " +#~ "le nombre de tours d'apprentissage " +#~ "fédéré (``num_rounds``), et la stratégie. " +#~ "La stratégie encapsule l'approche/algorithme " +#~ "d'apprentissage fédéré, par exemple, " +#~ "*Federated Averaging* (FedAvg)." #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "" +#~ "La seule chose qui reste à faire" +#~ " est d'indiquer à la stratégie " +#~ "d'appeler cette fonction chaque fois " +#~ "qu'elle reçoit des dictionnaires de " +#~ "métriques d'évaluation de la part des" +#~ " clients :" + +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "" + +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "" + +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "" + +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "" + +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "" + +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "" + +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "" + +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "" + +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "" + +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "" + +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "" + +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "" + +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "transforme les :code:`NDArray` du MXNet en :code:`ndarray` de NumPy" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "" -#~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ msgid "run\\_client\\_app" +#~ msgstr "" + +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" -#~ "La partie la plus difficile est de" -#~ " transformer les paramètres MXNet de " -#~ ":code:`NDArray` en :code:`NumPy Arrays` pour" -#~ " les rendre lisibles pour Flower." #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." #~ msgstr "" -#~ "Les deux méthodes :code:`NumPyClient` " -#~ ":code:`fit` et :code:`evaluate` utilisent les" -#~ " fonctions :code:`train()` et :code:`test()` " -#~ "définies précédemment dans :code:`mxnet_mnist.py`." -#~ " Ce que nous faisons vraiment ici," -#~ " c'est que nous indiquons à Flower," -#~ " par le biais de notre sous-" -#~ "classe :code:`NumPyClient`, laquelle de nos" -#~ " fonctions déjà définies doit être " -#~ "appelée pour l'entraînement et l'évaluation." -#~ " Nous avons inclus des annotations de" -#~ " type pour te donner une meilleure" -#~ " compréhension des types de données " -#~ "qui sont transmis." + +#~ msgid "run\\_server\\_app" +#~ msgstr "" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" -#~ "Après avoir défini le chargement des " -#~ "données, l'architecture du modèle, la " -#~ "formation et l'évaluation, nous pouvons " -#~ "tout rassembler et former notre modèle" -#~ " :code:`Sequential` sur MNIST." #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." #~ msgstr "" -#~ "dans chaque fenêtre (assure-toi que " -#~ "le serveur est toujours en cours " -#~ "d'exécution avant de le faire) et " -#~ "tu verras ton projet MXNet exécuter " -#~ "l'apprentissage fédéré sur deux clients. " -#~ "Félicitations !" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" -#~ "Le code source complet de cet " -#~ "exemple : `MXNet : From Centralized " -#~ "To Federated (Code) " -#~ "`_. Notre " -#~ "exemple est bien sûr un peu trop" -#~ " simplifié parce que les deux clients" -#~ " chargent exactement le même ensemble " -#~ "de données, ce qui n'est pas " -#~ "réaliste. Tu es maintenant prêt à " -#~ "explorer ce sujet plus en profondeur." -#~ " Pourquoi ne pas utiliser un CNN " -#~ "ou un ensemble de données différent " -#~ "? Pourquoi ne pas ajouter d'autres " -#~ "clients ?" -#~ msgid "with the following command sequence:" -#~ msgstr "avec la séquence de commandes suivante :" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" -#~ "Si tu es un chercheur, tu peux " -#~ "très bien utiliser les certificats " -#~ "auto-signés générés à l'aide des " -#~ "scripts qui font partie de ce " -#~ "guide." #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" -#~ "Nous allons maintenant montrer comment " -#~ "écrire un serveur qui utilise les " -#~ "scripts générés précédemment." #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ "Lorsqu'il fournit des certificats, le " -#~ "serveur attend un tuple de trois " -#~ "certificats. :code:`Path` peut être utilisé" -#~ " pour lire facilement le contenu de" -#~ " ces fichiers en chaînes d'octets, ce" -#~ " qui est le type de données " -#~ "attendu par :code:`start_server`." #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." #~ msgstr "" -#~ msgid "Flower server" -#~ msgstr "Serveur de Flower" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" #~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en savoir plus." #~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " à l'aide des transformateurs Hugging " +#~ "Face et de Flower !" + +#~ msgid "Dependencies" +#~ msgstr "Dépendances" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" +#~ "Pour suivre ce tutoriel, tu devras " +#~ "installer les paquets suivants : " +#~ ":code:`datasets`, :code:`evaluate`, :code:`flwr`, " +#~ ":code:`torch`, et :code:`transformers`. Cela " +#~ "peut être fait en utilisant :code:`pip`" +#~ " :" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "Flux de travail standard pour le visage" + +#~ msgid "Handling the data" +#~ msgstr "Traitement des données" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" +#~ "Pour récupérer le jeu de données " +#~ "IMDB, nous utiliserons la bibliothèque " +#~ ":code:`datasets` de Hugging Face. Nous " +#~ "devons ensuite tokeniser les données et" +#~ " créer des :code:`PyTorch` dataloaders, ce" +#~ " qui est fait dans la fonction " +#~ ":code:`load_data` :" + +#~ msgid "Training and testing the model" +#~ msgstr "Former et tester le modèle" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" +#~ "Une fois que nous avons trouvé un" +#~ " moyen de créer notre trainloader et" +#~ " notre testloader, nous pouvons nous " +#~ "occuper de l'entraînement et du test." +#~ " C'est très similaire à n'importe " +#~ "quelle boucle d'entraînement ou de test" +#~ " :code:`PyTorch` :" + +#~ msgid "Creating the model itself" +#~ msgstr "Créer le modèle lui-même" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" +#~ "Pour créer le modèle lui-même, " +#~ "nous allons simplement charger le modèle" +#~ " distillBERT pré-entraîné en utilisant le" +#~ " :code:`AutoModelForSequenceClassification` de Hugging" +#~ " Face :" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "Création du client IMDBC" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "Pour fédérer notre exemple à plusieurs" +#~ " clients, nous devons d'abord écrire " +#~ "notre classe de client Flower (héritant" +#~ " de :code:`flwr.client.NumPyClient`). C'est très" +#~ " facile, car notre modèle est un " +#~ "modèle :code:`PyTorch` standard :" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ "La fonction :code:`get_parameters` permet au" +#~ " serveur d'obtenir les paramètres du " +#~ "client. Inversement, la fonction " +#~ ":code:`set_parameters` permet au serveur " +#~ "d'envoyer ses paramètres au client. " +#~ "Enfin, la fonction :code:`fit` forme le" +#~ " modèle localement pour le client, et" +#~ " la fonction :code:`evaluate` teste le " +#~ "modèle localement et renvoie les mesures" +#~ " correspondantes." + +#~ msgid "Starting the server" +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "Maintenant que nous avons un moyen " +#~ "d'instancier les clients, nous devons " +#~ "créer notre serveur afin d'agréger les" +#~ " résultats. Avec Flower, cela peut " +#~ "être fait très facilement en choisissant" +#~ " d'abord une stratégie (ici, nous " +#~ "utilisons :code:`FedAvg`, qui définira les " +#~ "poids globaux comme la moyenne des " +#~ "poids de tous les clients à chaque" +#~ " tour) et en utilisant ensuite la " +#~ "fonction :code:`flwr.server.start_server` :" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "La fonction :code:`weighted_average` est là" +#~ " pour fournir un moyen d'agréger les" +#~ " mesures réparties entre les clients " +#~ "(en gros, cela nous permet d'afficher" +#~ " une belle moyenne de précision et" +#~ " de perte pour chaque tour)." + +#~ msgid "Putting everything together" +#~ msgstr "Tout assembler" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" +#~ "Nous pouvons maintenant démarrer des " +#~ "instances de clients en utilisant :" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" +#~ "Et ils pourront se connecter au " +#~ "serveur et démarrer la formation " +#~ "fédérée." + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" +#~ "Si tu veux voir tout ce qui " +#~ "est mis ensemble, tu devrais consulter" +#~ " l'exemple de code complet : " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." + +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" +#~ "Bien sûr, c'est un exemple très " +#~ "basique, et beaucoup de choses peuvent" +#~ " être ajoutées ou modifiées, il " +#~ "s'agissait juste de montrer avec quelle" +#~ " simplicité on pouvait fédérer un " +#~ "flux de travail Hugging Face à " +#~ "l'aide de Flower." #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" +#~ "Notez que dans cet exemple, nous " +#~ "avons utilisé :code:`PyTorch`, mais nous " +#~ "aurions très bien pu utiliser " +#~ ":code:`TensorFlow`." #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant PyTorch Lightning et " +#~ "Flower !" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" #~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en moins de 20 lignes de code" +#~ " !" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" +#~ "Comme nous voulons utiliser l'API Keras" +#~ " de TensorFlow (TF), nous devons " +#~ "également installer TF :" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" +#~ "Ensuite, dans un fichier appelé " +#~ ":code:`client.py`, importe Flower et " +#~ "TensorFlow :" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "Nous utilisons les utilitaires Keras de" +#~ " TF pour charger CIFAR10, un ensemble" +#~ " de données de classification d'images " +#~ "colorées populaire pour l'apprentissage " +#~ "automatique. L'appel à " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` télécharge " +#~ "CIFAR10, le met en cache localement, " +#~ "puis renvoie l'ensemble d'entraînement et " +#~ "de test sous forme de NumPy " +#~ "ndarrays." -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" +#~ "Ensuite, nous avons besoin d'un modèle." +#~ " Pour les besoins de ce tutoriel, " +#~ "nous utilisons MobilNetV2 avec 10 " +#~ "classes de sortie :" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Le serveur Flower interagit avec les " +#~ "clients par le biais d'une interface " +#~ "appelée :code:`Client`. Lorsque le serveur " +#~ "sélectionne un client particulier pour " +#~ "la formation, il envoie des instructions" +#~ " de formation sur le réseau. Le " +#~ "client reçoit ces instructions et " +#~ "appelle l'une des méthodes :code:`Client` " +#~ "pour exécuter ton code (c'est-à-dire " +#~ "pour former le réseau neuronal que " +#~ "nous avons défini plus tôt)." -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise Keras. L'interface " +#~ ":code:`NumPyClient` définit trois méthodes qui" +#~ " peuvent être mises en œuvre de " +#~ "la manière suivante :" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`CifarClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"[: :]:8080\"` indique au client " +#~ "à quel serveur se connecter. Dans " +#~ "notre cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"[: :]:8080\"`. Si nous exécutons " +#~ "une charge de travail véritablement " +#~ "fédérée avec le serveur et les " +#~ "clients fonctionnant sur des machines " +#~ "différentes, tout ce qui doit changer" +#~ " est l'adresse :code:`server_address` vers " +#~ "laquelle nous dirigeons le client." + +#~ msgid "Each client will have its own dataset." +#~ msgstr "Chaque client aura son propre ensemble de données." #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" +#~ "Tu devrais maintenant voir comment la" +#~ " formation se déroule dans le tout" +#~ " premier terminal (celui qui a " +#~ "démarré le serveur) :" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " `code source complet " +#~ "`_ pour cela se trouve" +#~ " dans :code:`examples/quickstart-tensorflow/client.py`." -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "Tout d'abord, démarre un serveur Flower :" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ "Flower 1.0 : ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" #~ msgstr "" -#~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "start\\_driver" -#~ msgstr "start_client" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" -#~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "The Driver object to use." +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "Simulation de moniteur" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" -#~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" #~ msgstr "" -#~ msgid "Quickstart MXNet" -#~ msgstr "Démarrage rapide de MXNet" +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" -#~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" #~ msgstr "" -#~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." +#~ msgid "|f403fcd69e4e44409627e748b404c086|" #~ msgstr "" -#~ "Dans ce tutoriel, nous allons apprendre" -#~ " à former un modèle :code:`Sequential` " -#~ "sur MNIST à l'aide de Flower et" -#~ " de MXNet." -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "Puisque nous voulons utiliser MXNet, allons-y et installons-le :" +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ msgid "|368378731066486fa4397e89bc6b870c|" #~ msgstr "" -#~ "Maintenant que toutes nos dépendances " -#~ "sont installées, lançons une formation " -#~ "distribuée simple avec deux clients et" -#~ " un serveur. Notre procédure de " -#~ "formation et l'architecture du réseau " -#~ "sont basées sur le tutoriel de " -#~ "reconnaissance de chiffres écrits à la" -#~ " main du MXNet " -#~ "`_." -#~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" #~ msgstr "" -#~ "Dans un fichier appelé :code:`client.py`, " -#~ "importe Flower et les paquets liés " -#~ "au MXNet :" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "En outre, définis l'attribution de l'appareil dans MXNet avec :" +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" -#~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" -#~ "Nous utilisons MXNet pour charger MNIST," -#~ " un ensemble de données de " -#~ "classification d'images populaire de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire MXNet :code:`mx.test_utils.get_mnist()` " -#~ "télécharge les données d'entraînement et " -#~ "de test." #~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" #~ msgstr "" -#~ "Définis l'entraînement et la perte avec" -#~ " MXNet. Nous entraînons le modèle en" -#~ " parcourant en boucle l'ensemble des " -#~ "données, nous mesurons la perte " -#~ "correspondante et nous l'optimisons." #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" #~ msgstr "" -#~ "Ensuite, nous définissons la validation " -#~ "de notre modèle d'apprentissage automatique." -#~ " Nous effectuons une boucle sur " -#~ "l'ensemble de test et mesurons à " -#~ "la fois la perte et la précision" -#~ " sur l'ensemble de test." #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." +#~ msgstr "" + +#~ msgid "" +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" #~ msgstr "" -#~ "Après avoir défini la formation et " -#~ "le test d'un modèle d'apprentissage " -#~ "automatique MXNet, nous utilisons ces " -#~ "fonctions pour mettre en œuvre un " -#~ "client Flower." -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "Nos clients Flower utiliseront un modèle simple :code:`Sequential` :" +#~ msgid "" +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" +#~ msgstr "" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" -#~ "Après avoir chargé l'ensemble de données" -#~ " avec :code:`load_data()`, nous effectuons " -#~ "une propagation vers l'avant pour " -#~ "initialiser le modèle et les paramètres" -#~ " du modèle avec :code:`model(init)`. " -#~ "Ensuite, nous implémentons un client " -#~ "Flower." #~ msgid "" -#~ "Flower provides a convenience class " -#~ "called :code:`NumPyClient` which makes it " -#~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" #~ msgstr "" -#~ "Flower fournit une classe de commodité" -#~ " appelée :code:`NumPyClient` qui facilite " -#~ "l'implémentation de l'interface :code:`Client` " -#~ "lorsque ta charge de travail utilise " -#~ "MXNet. L'implémentation de :code:`NumPyClient` " -#~ "signifie généralement la définition des " -#~ "méthodes suivantes (:code:`set_parameters` est " -#~ "cependant facultatif) :" -#~ msgid "They can be implemented in the following way:" -#~ msgstr "Ils peuvent être mis en œuvre de la manière suivante :" +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" #~ msgid "" -#~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " -#~ "one line to actually run this " -#~ "client:" +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" #~ msgstr "" -#~ "Nous pouvons maintenant créer une " -#~ "instance de notre classe :code:`MNISTClient`" -#~ " et ajouter une ligne pour exécuter" -#~ " ce client :" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" #~ msgstr "" -#~ "C'est tout pour le client. Il nous" -#~ " suffit d'implémenter :code:`Client` ou " -#~ ":code:`NumPyClient` et d'appeler " -#~ ":code:`fl.client.start_client()`. La chaîne " -#~ ":code:`\"0.0.0:8080\"` indique au client à " -#~ "quel serveur se connecter. Dans notre" -#~ " cas, nous pouvons exécuter le " -#~ "serveur et le client sur la même" -#~ " machine, c'est pourquoi nous utilisons " -#~ ":code:`\"0.0.0:8080\"`. Si nous exécutons une" -#~ " charge de travail véritablement fédérée" -#~ " avec le serveur et les clients " -#~ "s'exécutant sur des machines différentes, " -#~ "tout ce qui doit changer est " -#~ ":code:`server_address` que nous transmettons " -#~ "au client." #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." #~ msgstr "" -#~ "Le client et le serveur étant " -#~ "prêts, nous pouvons maintenant tout " -#~ "exécuter et voir l'apprentissage fédéré " -#~ "en action. Les systèmes d'apprentissage " -#~ "fédéré ont généralement un serveur et" -#~ " plusieurs clients. Nous devons donc " -#~ "commencer par démarrer le serveur :" #~ msgid "" -#~ "Congratulations! You've successfully built and" -#~ " run your first federated learning " -#~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." #~ msgstr "" -#~ "Félicitations ! Tu as réussi à " -#~ "construire et à faire fonctionner ton" -#~ " premier système d'apprentissage fédéré. Le" -#~ " code source complet " -#~ "`_ de cet exemple se " -#~ "trouve dans :code:`examples/quickstart-mxnet`." -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" +#~ msgid "Enable node authentication in :code:`SuperNode`" +#~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "Charge l'ensemble de données MNIST à l'aide d'OpenML" +#~ msgid "" +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" +#~ msgstr "" -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ msgid "" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." +#~ msgstr "" -#~ msgid "Shuffles data and its label" -#~ msgstr "Mélange les données et leur étiquette" +#~ msgid "" +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "Divise les ensembles de données en un certain nombre de partitions" +#~ msgid "" +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" +#~ msgstr "" + +#~ msgid "or with ``mamba``::" +#~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." #~ msgstr "" -#~ "Nous chargeons l'ensemble de données " -#~ "MNIST de `OpenML `_," -#~ " un ensemble de données de " -#~ "classification d'images populaires de chiffres" -#~ " manuscrits pour l'apprentissage automatique. " -#~ "L'utilitaire :code:`utils.load_mnist()` télécharge " -#~ "les données d'entraînement et de test." -#~ " L'ensemble d'entraînement est ensuite " -#~ "divisé en 10 partitions avec " -#~ ":code:`utils.partition()`." -#~ msgid "Let's get stated!" -#~ msgstr "Allons-y, déclarons-le !" +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "" +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "" +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" #~ msgid "" -#~ "Flower provides pre-made docker images" -#~ " on `Docker Hub `_" -#~ " that include all necessary dependencies" -#~ " for running the SuperLink. You can" -#~ " also build your own custom docker" -#~ " images from scratch with a different" -#~ " version of Python or Ubuntu if " -#~ "that is what you need. In this " -#~ "guide, we will explain what images " -#~ "exist and how to build them " -#~ "locally." +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." #~ msgstr "" #~ msgid "" -#~ "Both, base and SuperLink image are " -#~ "configured via build arguments. Through " -#~ "build arguments, we can make our " -#~ "build more flexible. For example, in " -#~ "the base image, we can specify the" -#~ " version of Python to install using" -#~ " the ``PYTHON_VERSION`` build argument. " -#~ "Some of the build arguments have " -#~ "default values, others must be specified" -#~ " when building the image. All " -#~ "available build arguments for each image" -#~ " are listed in one of the " -#~ "tables below." +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" #~ msgstr "" -#~ msgid "``3.11``" -#~ msgstr "1.0.0rc1" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" -#~ msgid "``UBUNTU_VERSION``" +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." #~ msgstr "" -#~ msgid "Version of the official Ubuntu Docker image." +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" -#~ msgid "Defaults to ``22.04``." +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" #~ msgstr "" #~ msgid "" -#~ "The following example creates a base " -#~ "image with Python 3.11.0, pip 23.0.1 " -#~ "and setuptools 69.0.2:" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" #~ msgstr "" -#~ msgid "Building the SuperLink image" -#~ msgstr "Démarrer le serveur" +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" -#~ msgid "Defaults to ``flwr/base``." +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "Évaluer la réponse d'un client." +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ msgstr "" -#~ msgid "Defaults to ``py3.11``." +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." #~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" -#~ msgid "The PyPI package to install." +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "Flux de travail" - #~ msgid "" -#~ "The following example creates a " -#~ "SuperLink image with the official Flower" -#~ " base image py3.11-ubuntu22.04 and Flower" -#~ " 1.8.0:" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." #~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" #~ msgid "" -#~ "If you want to use your own " -#~ "base image instead of the official " -#~ "Flower base image, all you need to" -#~ " do is set the ``BASE_REPOSITORY``, " -#~ "``PYTHON_VERSION`` and ``UBUNTU_VERSION`` build " -#~ "arguments." +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." #~ msgstr "" -#~ msgid "Creating New Messages" -#~ msgstr "Création de nouveaux messages" +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ msgstr "" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ "Voici un guide simple pour créer " -#~ "un nouveau type de message entre " -#~ "le serveur et les clients dans " -#~ "Flower." #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" #~ msgstr "" -#~ "Supposons que nous ayons les fonctions" -#~ " suivantes dans :code:`server.py` et " -#~ ":code:`numpy_client.py`..." -#~ msgid "Server's side:" -#~ msgstr "Côté serveur :" +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" -#~ msgid "Client's side:" -#~ msgstr "Côté client :" +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ msgstr "" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" -#~ "Voyons maintenant ce que nous devons " -#~ "mettre en œuvre pour que cette " -#~ "simple fonction entre le serveur et " -#~ "le client fonctionne !" -#~ msgid "Message Types for Protocol Buffers" -#~ msgstr "Types de messages pour les tampons de protocole" +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" +#~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." #~ msgstr "" -#~ "La première chose à faire est de" -#~ " définir un type de message pour " -#~ "le système RPC dans :code:`transport.proto`." -#~ " Notez que nous devons le faire " -#~ "à la fois pour les messages de " -#~ "demande et de réponse. Pour plus " -#~ "de détails sur la syntaxe de " -#~ "proto3, veuillez consulter la `documentation" -#~ " officielle `_." -#~ msgid "Within the :code:`ServerMessage` block:" -#~ msgstr "Dans le bloc :code:`ServerMessage` :" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" -#~ msgid "Within the ClientMessage block:" -#~ msgstr "Dans le bloc ClientMessage :" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" -#~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" #~ msgstr "" -#~ "Veille à ajouter également un champ " -#~ "du type de message nouvellement créé " -#~ "dans :code:`oneof msg`." -#~ msgid "Once that is done, we will compile the file with:" -#~ msgstr "Une fois que c'est fait, nous compilerons le fichier avec :" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" -#~ msgid "If it compiles successfully, you should see the following message:" -#~ msgstr "S'il se compile avec succès, tu devrais voir le message suivant :" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" -#~ msgid "Serialization and Deserialization Functions" -#~ msgstr "Fonctions de sérialisation et de désérialisation" +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" -#~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" #~ msgstr "" -#~ "La prochaine étape consiste à ajouter" -#~ " des fonctions pour sérialiser et " -#~ "désérialiser les types de données Python" -#~ " vers ou à partir des types de" -#~ " messages RPC définis. Tu dois " -#~ "ajouter ces fonctions dans :code:`serde.py`." -#~ msgid "The four functions:" -#~ msgstr "Les quatre fonctions :" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" -#~ msgid "Sending the Message from the Server" -#~ msgstr "Envoi du message à partir du serveur" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" -#~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ "Écris maintenant la fonction de demande" -#~ " dans ta classe Client Proxy (par " -#~ "exemple, :code:`grpc_client_proxy.py`) en utilisant" -#~ " les fonctions serde que tu viens " -#~ "de créer :" -#~ msgid "Receiving the Message by the Client" -#~ msgstr "Réception du message par le client" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" -#~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ "Dernière étape ! Modifie le code " -#~ "dans :code:`message_handler.py` pour vérifier " -#~ "le champ de ton message et appeler" -#~ " la fonction :code:`example_response`. N'oublie" -#~ " pas d'utiliser les fonctions serde !" -#~ msgid "Within the handle function:" -#~ msgstr "Dans le cadre de la fonction de poignée :" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" -#~ msgid "And add a new function:" -#~ msgstr "Et ajoute une nouvelle fonction :" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" #~ msgstr "" -#~ "Avec un peu de chance, lorsque tu" -#~ " exécuteras ton programme, tu obtiendras" -#~ " le résultat escompté !" -#~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__." +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" #~ msgstr "" -#~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``--volume``" -#~ " to mount the user's home directory" -#~ " (``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the user's home directory on your " -#~ "host system. If the file already " -#~ "exists, the SuperLink tries to restore" -#~ " the state from the file. To " -#~ "start the SuperLink with an empty " -#~ "database, simply remove the ``state.db`` " -#~ "file." +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ msgid "|d62da263071d45a496f543e41fce3a19|" #~ msgstr "" -#~ msgid "" -#~ "``--server 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" #~ msgstr "" -#~ msgid "" -#~ "``--server 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" #~ msgstr "" -#~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" #~ msgstr "" -#~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`__." +#~ msgid "|e6ca84e1df244f238288a768352678e5|" #~ msgstr "" -#~ msgid "" -#~ "Here's another example to start with " -#~ "HTTPS. Use the ``--certificates`` command " -#~ "line argument to pass paths to (CA" -#~ " certificate, server certificate, and " -#~ "server private key)." +#~ msgid "|39c2422082554a21963baffb33a0d057|" #~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" #~ msgstr "" -#~ msgid "Run Flower server (Driver API)." -#~ msgstr "flower-driver-api" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" #~ msgstr "" -#~ msgid "Run Flower server (Fleet API)." -#~ msgstr "flower-fleet-api" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" #~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" #~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" #~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" #~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" #~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" #~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" #~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" #~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" #~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" #~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index d0ba4f6ed5a1..424eaf5f86a2 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -7,78 +7,226 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" -"PO-Revision-Date: 2024-06-25 10:43+0000\n" -"Last-Translator: \"Young D. Kwon\" \n" -"Language-Team: Korean \n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" +"PO-Revision-Date: 2024-08-23 13:09+0000\n" +"Last-Translator: Seulki Yun \n" "Language: ko\n" +"Language-Team: Korean \n" +"Plural-Forms: nplurals=1; plural=0;\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -"X-Generator: Weblate 5.6-rc\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Flower 아키텍처" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +#, fuzzy +msgid "Flower public API" +msgstr "Flower ClientApp." + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "엣지 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" -"`Flower `_ core framework architecture with Edge Client " -"Engine" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." msgstr "" -"`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 엔" -"진" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "가상 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 엔" -"진" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both Virtual " -"Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 클라" -"이언트 엔진" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "Docker Flower 이미지를 Locally 구축하는 방법" #: ../../source/contributor-how-to-build-docker-images.rst:4 msgid "" -"Flower provides pre-made docker images on `Docker Hub `_ that include all necessary dependencies for running the " -"SuperLink, SuperNode or ServerApp. You can also build your own custom docker " -"images from scratch with a different version of Python or Linux distribution " -"(Ubuntu/Alpine) if that is what you need. In this guide, we will explain " -"what images exist and how to build them locally." +"Flower provides pre-made docker images on `Docker Hub " +"`_ that include all necessary dependencies" +" for running the SuperLink, SuperNode or ServerApp. You can also build " +"your own custom docker images from scratch with a different version of " +"Python or Linux distribution (Ubuntu/Alpine) if that is what you need. In" +" this guide, we will explain what images exist and how to build them " +"locally." msgstr "" -"Flower는 'Docker Hub '_에서 미리 만들어진 " -"Docker 이미지들을 제공합니다. 해당 이미지들은 SuperLink, ServerNode 또는 " -"ServerApp을 실행하는 데 필요한 모든 dependencies를 포함합니다. 필요한 경우 다" -"른 버전의 Python이나 Linux 배포판(Ubuntu/Alpine)을 사용해 처음부터 사용자 정" -"의 Docker 이미지를 빌드할 수도 있습니다. 이 가이드에서는 존재하는 이미지들과 " -"이들을 로컬에서 빌드하는 방법에 대해 설명하겠습니다." +"Flower는 'Docker Hub '_에서 미리 만들어진 Docker " +"이미지들을 제공합니다. 해당 이미지들은 SuperLink, ServerNode 또는 ServerApp을 실행하는 데 필요한 모든 " +"dependencies를 포함합니다. 필요한 경우 다른 버전의 Python이나 Linux 배포판(Ubuntu/Alpine)을 사용해" +" 처음부터 사용자 정의 Docker 이미지를 빌드할 수도 있습니다. 이 가이드에서는 존재하는 이미지들과 이들을 로컬에서 빌드하는 " +"방법에 대해 설명하겠습니다." #: ../../source/contributor-how-to-build-docker-images.rst:10 msgid "" @@ -86,268 +234,291 @@ msgid "" "development environment." msgstr "시작하기 전에, 로컬 개발 환경에서 몇 가지 전제 조건을 충족해야 합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:12 -msgid "Clone the flower repository." -msgstr "Flower 레포지토리를 복제합니다." +#: ../../source/contributor-how-to-build-docker-images.rst:13 +#, fuzzy +msgid "Clone the ``flower`` repository." +msgstr "플라워 레포지토리를 클론합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "Docker 데몬이 실행 중인지 확인하십시오." -#: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -":doc:Run Flower using Docker 의 첫 번째 섹션" -"을 따라 주십시오. 해당 부분을 더 자세히 설명해 줍니다." - -#: ../../source/contributor-how-to-build-docker-images.rst:25 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " -"respective Dockerfiles. You can find them in the subdirectories of ``src/" -"docker``." +"respective Dockerfiles. You can find them in the subdirectories of " +"``src/docker``." msgstr "" -"이미지들을 조합하는 빌드 명령어들은 해당 Dockerfile에 있습니다. \"src/" -"docker\" 의 하위 디렉토리에서 찾을 수 있습니다." +"이미지들을 조합하는 빌드 명령어들은 해당 Dockerfile에 있습니다. \"src/docker\" 의 하위 디렉토리에서 찾을 수 " +"있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:24 msgid "" "Flower Docker images are configured via build arguments. Through build " -"arguments, we can make the creation of images more flexible. For example, in " -"the base image, we can specify the version of Python to install using the " -"``PYTHON_VERSION`` build argument. Some of the build arguments have default " -"values, others must be specified when building the image. All available " -"build arguments for each image are listed in one of the tables below." -msgstr "" -"Flower Docker는 빌드 전달인자를 통해 구성됩니다. 빌드 argument들을 통해, " -"이미지를 보다 유연하게 생성할 수 있습니다. 예를 들어, base 이미지에서 " -"\"PYTHON_VERSION\" 빌드 전달인자를 사용하여 Python 버전을 지정할 수 " -"있습니다. 일부 빌드 전달인자들은 기본값이며, 이미지를 빌드할 때 지정해야 " -"합니다. 각 이미지에 사용할 수 있는 모든 빌드 전달인자는 아래 표 중에 " -"있습니다." +"arguments, we can make the creation of images more flexible. For example," +" in the base image, we can specify the version of Python to install using" +" the ``PYTHON_VERSION`` build argument. Some of the build arguments have " +"default values, others must be specified when building the image. All " +"available build arguments for each image are listed in one of the tables " +"below." +msgstr "" +"Flower Docker는 빌드 전달인자를 통해 구성됩니다. 빌드 argument들을 통해, 이미지를 보다 유연하게 생성할 수 " +"있습니다. 예를 들어, base 이미지에서 \"PYTHON_VERSION\" 빌드 전달인자를 사용하여 Python 버전을 지정할 수" +" 있습니다. 일부 빌드 전달인자들은 기본값이며, 이미지를 빌드할 때 지정해야 합니다. 각 이미지에 사용할 수 있는 모든 빌드 " +"전달인자는 아래 표 중에 있습니다." -#: ../../source/contributor-how-to-build-docker-images.rst:35 -msgid "Building the base image" +#: ../../source/contributor-how-to-build-docker-images.rst:32 +#, fuzzy +msgid "Building the Base Image" msgstr "기본 이미지 빌드" -#: ../../source/contributor-how-to-build-docker-images.rst:41 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "Build argument" msgstr "빌드 전달인자" -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 msgid "Description" msgstr "설명" -#: ../../source/contributor-how-to-build-docker-images.rst:43 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 msgid "Required" msgstr "필수" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "예시" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "``DISTRO``" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:43 msgid "The Linux distribution to use as the base image." msgstr "기본 이미지 사용을 위한 Linux 배포판." -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 msgid "No" msgstr "아니오" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:45 msgid "``ubuntu``" msgstr "``ubuntu``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:46 msgid "``DISTRO_VERSION``" msgstr "``DISTRO_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "Linux 배포판 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:52 -msgid "``22.04``" -msgstr "``22.04``" +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:50 msgid "``PYTHON_VERSION``" msgstr "``PYTHON_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "Version of ``python`` to be installed." msgstr "설치 된 ``python`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "``3.11`` 또는 ``3.11.1``" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "설치 된 ``pip`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 msgid "Yes" msgstr "예" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -msgid "``23.0.1``" -msgstr "``23.0.1``" +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "설치 된 ``setuptools`` 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:64 -msgid "``69.0.2``" -msgstr "``69.0.2``" +#: ../../source/contributor-how-to-build-docker-images.rst:61 +#, fuzzy +msgid ":substitution-code:`|setuptools_version|`" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:63 msgid "Version of Flower to be installed." msgstr "설치 된 Flower 버전." -#: ../../source/contributor-how-to-build-docker-images.rst:68 -msgid "``1.8.0``" -msgstr "``1.8.0``" +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:66 msgid "``FLWR_PACKAGE``" msgstr "``FLWR_PACKAGE``" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:67 msgid "The Flower package to be installed." msgstr "설치 할 Flower 패키지." -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "``flwr`` 또는 ``flwr-nightly``" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:70 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:76 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "예시 요청" + +#: ../../source/contributor-how-to-build-docker-images.rst:78 +#, fuzzy msgid "" -"The following example creates a base Ubuntu/Alpine image with Python 3.11.0, " -"pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"The following example creates a base Ubuntu/Alpine image with Python " +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" -"다음 예시에서는 Python 3.11.0, pip 23.0.1, setuptools 및 Flower 1.8.0으로 기" -"본 Ubuntu/Alpine 이미지를 만듭니다:" +"다음 예시에서는 Python 3.11.0, pip 23.0.1, setuptools 및 Flower 1.8.0으로 기본 " +"Ubuntu/Alpine 이미지를 만듭니다:" -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:93 +#, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that the " -"build arguments as well as the name and tag can be adapted to your needs. " -"These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" -"이미지의 이름은 ``flwr_base``이고 태그는 ``0.1.0``입니다. 필요에 따라 빌드 " -"전달인자들 뿐만 아니라 이름과 태그도 정할 수 있습니다. 이 값들은 예시일 " -"뿐입니다." +"이미지의 이름은 ``flwr_base``이고 태그는 ``0.1.0``입니다. 필요에 따라 빌드 전달인자들 뿐만 아니라 이름과 태그도" +" 정할 수 있습니다. 이 값들은 예시일 뿐입니다." -#: ../../source/contributor-how-to-build-docker-images.rst:92 -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "SuperLink/SuperNode 또는 ServerApp 이미지 빌드" +#: ../../source/contributor-how-to-build-docker-images.rst:98 +#, fuzzy +msgid "Building a Flower Binary Image" +msgstr "기본 이미지 빌드" -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:108 msgid "``BASE_REPOSITORY``" msgstr "``BASE_REPOSITORY``" -#: ../../source/contributor-how-to-build-docker-images.rst:103 +#: ../../source/contributor-how-to-build-docker-images.rst:109 msgid "The repository name of the base image." msgstr "기본 이미지의 레포지토리 이름." -#: ../../source/contributor-how-to-build-docker-images.rst:105 +#: ../../source/contributor-how-to-build-docker-images.rst:111 msgid "``flwr/base``" msgstr "``flwr/base``" -#: ../../source/contributor-how-to-build-docker-images.rst:106 +#: ../../source/contributor-how-to-build-docker-images.rst:112 msgid "``BASE_IMAGE``" msgstr "``BASE_IMAGE``" -#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/contributor-how-to-build-docker-images.rst:113 msgid "The Tag of the Flower base image." msgstr "Flower 기본 이미지의 태그." -#: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" -msgstr "``1.8.0-py3.10-ubuntu22.04``" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:111 +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image with " -"the official Flower base image:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -"다음 예시에서는 공식 Flower 기본 이미지로 SuperLink/SuperNode 또는 ServerApp" -"이미지를 만듭니다:" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:128 +#, fuzzy msgid "" -"If you want to use your own base image instead of the official Flower base " -"image, all you need to do is set the ``BASE_REPOSITORY`` build argument." +"If you want to use your own base image instead of the official Flower " +"base image, all you need to do is set the ``BASE_REPOSITORY`` build " +"argument to ``flwr_base`` (as we've specified above)." msgstr "" -"공식 Flower 기본 이미지 대신 자체 기본 이미지를 사용 하길 원한다면, " -"``BASE_REPOSITORY`` 빌드 전달인자들을 설정해야 합니다." +"공식 Flower 기본 이미지 대신 자체 기본 이미지를 사용 하길 원한다면, ``BASE_REPOSITORY`` 빌드 전달인자들을 " +"설정해야 합니다." -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:140 msgid "After creating the image, we can test whether the image is working:" msgstr "이미지 생성 후에, 이미지가 작동하는지 테스트할 수 있습니다:" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "예시 요청" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "번역 기여" #: ../../source/contributor-how-to-contribute-translations.rst:4 msgid "" -"Since `Flower 1.5 `_ we have introduced translations to our doc pages, " -"but, as you might have noticed, the translations are often imperfect. If you " -"speak languages other than English, you might be able to help us in our " -"effort to make Federated Learning accessible to as many people as possible " -"by contributing to those translations! This might also be a great " -"opportunity for those wanting to become open source contributors with little " -"prerequisites." -msgstr "" -"`Flower 1.5 `_ 부터 문서 페이지에 번역을 도입했지만, 아시다시피 " -"번역이 불안전한 경우가 많습니다. 만일 영어 이외의 언어를 사용한다면, 많은 " -"사람들이 연합 학습에 접근할 수 있도록 번역 작업에 기여함으로써 저희의 노력에 " -"도움을 주실 수 있습니다! 이는 전제 조건이 거의 없는 오픈 소스 기여자가 " -"되고자 하는 사람들에게 좋은 기회가 될 수도 있습니다." +"Since `Flower 1.5 `_ we have introduced translations to " +"our doc pages, but, as you might have noticed, the translations are often" +" imperfect. If you speak languages other than English, you might be able " +"to help us in our effort to make Federated Learning accessible to as many" +" people as possible by contributing to those translations! This might " +"also be a great opportunity for those wanting to become open source " +"contributors with little prerequisites." +msgstr "" +"`Flower 1.5 `_ 부터 문서 페이지에 번역을 도입했지만, 아시다시피 번역이 불안전한 " +"경우가 많습니다. 만일 영어 이외의 언어를 사용한다면, 많은 사람들이 연합 학습에 접근할 수 있도록 번역 작업에 기여함으로써 저희의" +" 노력에 도움을 주실 수 있습니다! 이는 전제 조건이 거의 없는 오픈 소스 기여자가 되고자 하는 사람들에게 좋은 기회가 될 수도 " +"있습니다." #: ../../source/contributor-how-to-contribute-translations.rst:13 msgid "" -"Our translation project is publicly available over on `Weblate `_, this where most of " -"the work will happen." +"Our translation project is publicly available over on `Weblate " +"`_, this " +"where most of the work will happen." msgstr "" -"번역 프로젝트는 `Weblate `_에서 공개적으로 진행되며, 대부분의 작업이 이곳에서 이루어집니다." +"번역 프로젝트는 `Weblate `_에서 공개적으로 진행되며, 대부분의 작업이 이곳에서 이루어집니다." #: ../../source/contributor-how-to-contribute-translations.rst:18 msgid "Contribute to existing languages" @@ -355,105 +526,101 @@ msgstr "기존 언어에 기여하기" #: ../../source/contributor-how-to-contribute-translations.rst:23 msgid "" -"The first thing you will need to do in order to contribute is to create a " -"free Weblate account on this `page `_. More information about profile settings can be found `here " +"The first thing you will need to do in order to contribute is to create a" +" free Weblate account on this `page " +"`_. More information about" +" profile settings can be found `here " "`_." msgstr "" -"기여를 하기 위해 가장 먼저 해야 할 일은 해당 `page `_에서 무료 Weblate 계정을 만드는 것입니다. 프로필 설" -"정에 대한 자세한 정보는 `here `_를 참조하세요." +"기여를 하기 위해 가장 먼저 해야 할 일은 해당 `page " +"`_에서 무료 Weblate 계정을 만드는 " +"것입니다. 프로필 설정에 대한 자세한 정보는 `here " +"`_를 참조하세요." -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" -"Once you are signed in to Weblate, you can navigate to the `Flower Framework " -"project `_. " -"Here, you should see the different existing languages that can be found on " -"the website." +"Once you are signed in to Weblate, you can navigate to the `Flower " +"Framework project `_. Here, you should see the different existing languages" +" that can be found on the website." msgstr "" -"Weblate에 로그인한 후, `Flower Framework project `_로 이동할 수 있습니다. 여기에서 웹사이트에 " -"있는 다양한 기존 언어들을 확인할 수 있습니다." +"Weblate에 로그인한 후, `Flower Framework project " +"`_로 이동할 수 " +"있습니다. 여기에서 웹사이트에 있는 다양한 기존 언어들을 확인할 수 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" -"Once you have selected the language you want to contribute to, you should " -"see a similar interface to this:" +"Once you have selected the language you want to contribute to, you should" +" see a similar interface to this:" msgstr "기여하고자 하는 언어를 선택하면, 다음과 같은 인터페이스가 나타납니다:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " -"button on the top right (in the ``Translation status`` section). This will " -"automatically bring you to the translation interface for untranslated " -"strings." +"button on the top right (in the ``Translation status`` section). This " +"will automatically bring you to the translation interface for " +"untranslated strings." msgstr "" -"여기서 가장 간단한 옵션은 오른쪽 상단(``Translation status`` 부분)에 있는 " -"``Translate`` 버튼을 클릭하는 것 입니다. 번역되지 않은 문장에 대한 번역 인터" -"페이스로 자동으로 이동합니다." +"여기서 가장 간단한 옵션은 오른쪽 상단(``Translation status`` 부분)에 있는 ``Translate`` 버튼을 " +"클릭하는 것 입니다. 번역되지 않은 문장에 대한 번역 인터페이스로 자동으로 이동합니다." -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 msgid "This is what the interface looks like:" msgstr "인터페이스는 다음과 같습니다:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 msgid "" -"You input your translation in the text box at the top and then, once you are " -"happy with it, you either press ``Save and continue`` (to save the " -"translation and go to the next untranslated string), ``Save and stay`` (to " -"save the translation and stay on the same page), ``Suggest`` (to add your " -"translation to suggestions for other users to view), or ``Skip`` (to go to " -"the next untranslated string without saving anything)." +"You input your translation in the text box at the top and then, once you " +"are happy with it, you either press ``Save and continue`` (to save the " +"translation and go to the next untranslated string), ``Save and stay`` " +"(to save the translation and stay on the same page), ``Suggest`` (to add " +"your translation to suggestions for other users to view), or ``Skip`` (to" +" go to the next untranslated string without saving anything)." msgstr "" -"번역문을 상단의 텍스트 상자에 입력한 후, 번역이 만족스러우면 ``Save and " -"continue``(번역을 저장하고 다음 미번역 문장으로 이동), ``Save and stay``(번역" -"을 저장하고 해당 페이지에 머무르기), ``Suggest`` (다른 사용자가 볼 수 있도록 " -"번역을 제안 항목에 추가), ``Skip``(아무것도 저장하지 않고 다음 미번역 문장으" -"로 이동) 중 하나를 선택하면 됩니다." +"번역문을 상단의 텍스트 상자에 입력한 후, 번역이 만족스러우면 ``Save and continue``(번역을 저장하고 다음 미번역 " +"문장으로 이동), ``Save and stay``(번역을 저장하고 해당 페이지에 머무르기), ``Suggest`` (다른 사용자가 " +"볼 수 있도록 번역을 제안 항목에 추가), ``Skip``(아무것도 저장하지 않고 다음 미번역 문장으로 이동) 중 하나를 선택하면 " +"됩니다." -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " "``Automatic suggestions`` (from machine translation engines), the " -"translations in ``Other languages``, and the ``History`` of translations for " -"this string." +"translations in ``Other languages``, and the ``History`` of translations " +"for this string." msgstr "" -"번역에 도움을 주기위해 하단에서 `주변 문자열``, ``의견``(다른 기여자의), ``자" -"동 제안``(기계 번역의), ``다른 언어``의 번역 및 해당 문장의 번역``히스토리``" -"를 볼 수 있습니다." +"번역에 도움을 주기위해 하단에서 `주변 문자열``, ``의견``(다른 기여자의), ``자동 제안``(기계 번역의), ``다른 " +"언어``의 번역 및 해당 문장의 번역``히스토리``를 볼 수 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" -"On the right, under the ``String information`` section, you can also click " -"the link under ``Source string location`` in order to view the source of the " -"doc file containing the string." -msgstr "" -"오른쪽의 ``문자열 정보``에서 ``원본 문자열 위치``를 클릭하여 해당 문장이 포함" -"된 문서의 파일 소스를 볼 수도 있습니다." +"On the right, under the ``String information`` section, you can also " +"click the link under ``Source string location`` in order to view the " +"source of the doc file containing the string." +msgstr "오른쪽의 ``문자열 정보``에서 ``원본 문자열 위치``를 클릭하여 해당 문장이 포함된 문서의 파일 소스를 볼 수도 있습니다." -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" -"For more information about translating using Weblate, you can check out this " -"`in-depth guide `_." +"For more information about translating using Weblate, you can check out " +"this `in-depth guide " +"`_." msgstr "" -"Weblate를 통한 번역에 대한 자세한 정보는 `in-depth guide `_를 확인하세요." +"Weblate를 통한 번역에 대한 자세한 정보는 `in-depth guide " +"`_를 확인하세요." -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "새 언어 추가" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" -"If you want to add a new language, you will first have to contact us, either " -"on `Slack `_, or by opening an issue on our " -"`GitHub repo `_." +"If you want to add a new language, you will first have to contact us, " +"either on `Slack `_, or by opening an issue" +" on our `GitHub repo `_." msgstr "" -"새 언어를 추가하려면, `Slack `에 문의하거나 " -"`GitHub repo `_에서 issue에 들어가 문의 해야 " -"합니다." +"새 언어를 추가하려면, `Slack `에 문의하거나 `GitHub repo " +"`_에서 issue에 들어가 문의 해야 합니다." #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 msgid "Develop in VSCode Dev Containers" @@ -461,111 +628,104 @@ msgstr "VSCode Dev Container에서 개발" #: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 msgid "" -"When working on the Flower framework we want to ensure that all contributors " -"use the same developer environment to format code or run tests. For this " -"purpose we are using the VSCode Remote Containers extension. What is it? " -"Read the following quote:" -msgstr "" -"Flower 프레임워크 작업시, 모든 기여자들이 코드 포맷팅이나 테스트 실행을 위해 " -"동일한 개발 환경을 사용하길 원합니다. 이를 위해 VSCode Remote Containers 확장" -"을 사용하고 있습니다. 그것이 무엇인지 알아보기 위해 다음 인용문을 읽어보세요:" - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 -msgid "" -"The Visual Studio Code Remote - Containers extension lets you use a Docker " -"container as a fully-featured development environment. It allows you to open " -"any folder inside (or mounted into) a container and take advantage of Visual " -"Studio Code's full feature set. A :code:`devcontainer.json` file in your " -"project tells VS Code how to access (or create) a development container with " -"a well-defined tool and runtime stack. This container can be used to run an " -"application or to separate tools, libraries, or runtimes needed for working " -"with a codebase." -msgstr "" -"Visual Studio Code Remote - 컨테이너 확장을 사용하면 Docker 컨테이너를 모든 " -"기능을 갖춘 개발 환경으로 사용할 수 있습니다. 이 확장 기능을 사용하면 컨테이" -"너 내부(또는 컨테이너에 마운트된)의 모든 폴더를 열고 Visual Studio Code의 모" -"든 기능을 활용할 수 있습니다. 프로젝트에 있는 :code:`devcontainer.json` 파일" -"은 잘 정의된 도구와 런타임 스택을 사용하여 개발 컨테이너에 액세스(또는 생성)" -"하는 방법을 VS Code에 알려줍니다. 이 컨테이너는 애플리케이션을 실행하거나 코" -"드베이스 작업에 필요한 도구, 라이브러리 또는 런타임을 분리하는 데 사용할 수 " -"있습니다." - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 -msgid "" -"Workspace files are mounted from the local file system or copied or cloned " -"into the container. Extensions are installed and run inside the container, " -"where they have full access to the tools, platform, and file system. This " -"means that you can seamlessly switch your entire development environment " -"just by connecting to a different container." -msgstr "" -"작업 공간 파일은 로컬 파일 시스템에서 마운트되거나 컨테이너에 복사 또는 클론" -"됩니다. 확장 프로그램은 컨테이너 내부에 설치되고 실행되며, 도구, 플랫폼 및 파" -"일 시스템에 완전한 접근 권한을 갖습니다. 이는 다른 컨테이너에 연결하는 것만으" -"로 전체 개발 환경을 원활하게 전환할 수 있음을 의미합니다." - -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 -msgid "" -"Source: `Official VSCode documentation `_" +"When working on the Flower framework we want to ensure that all " +"contributors use the same developer environment to format code or run " +"tests. For this purpose we are using the VSCode Remote Containers " +"extension. What is it? Read the following quote:" +msgstr "" +"Flower 프레임워크 작업시, 모든 기여자들이 코드 포맷팅이나 테스트 실행을 위해 동일한 개발 환경을 사용하길 원합니다. 이를 " +"위해 VSCode Remote Containers 확장을 사용하고 있습니다. 그것이 무엇인지 알아보기 위해 다음 인용문을 " +"읽어보세요:" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy +msgid "" +"The Visual Studio Code Remote - Containers extension lets you use a " +"Docker container as a fully-featured development environment. It allows " +"you to open any folder inside (or mounted into) a container and take " +"advantage of Visual Studio Code's full feature set. A " +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." +msgstr "" +"Visual Studio Code Remote - 컨테이너 확장을 사용하면 Docker 컨테이너를 모든 기능을 갖춘 개발 환경으로 " +"사용할 수 있습니다. 이 확장 기능을 사용하면 컨테이너 내부(또는 컨테이너에 마운트된)의 모든 폴더를 열고 Visual Studio" +" Code의 모든 기능을 활용할 수 있습니다. 프로젝트에 있는 :code:`devcontainer.json` 파일은 잘 정의된 " +"도구와 런타임 스택을 사용하여 개발 컨테이너에 액세스(또는 생성)하는 방법을 VS Code에 알려줍니다. 이 컨테이너는 " +"애플리케이션을 실행하거나 코드베이스 작업에 필요한 도구, 라이브러리 또는 런타임을 분리하는 데 사용할 수 있습니다." + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 +msgid "" +"Workspace files are mounted from the local file system or copied or " +"cloned into the container. Extensions are installed and run inside the " +"container, where they have full access to the tools, platform, and file " +"system. This means that you can seamlessly switch your entire development" +" environment just by connecting to a different container." +msgstr "" +"작업 공간 파일은 로컬 파일 시스템에서 마운트되거나 컨테이너에 복사 또는 클론됩니다. 확장 프로그램은 컨테이너 내부에 설치되고 " +"실행되며, 도구, 플랫폼 및 파일 시스템에 완전한 접근 권한을 갖습니다. 이는 다른 컨테이너에 연결하는 것만으로 전체 개발 환경을 " +"원활하게 전환할 수 있음을 의미합니다." + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 +msgid "" +"Source: `Official VSCode documentation " +"`_" msgstr "출처 : 공식 VSCode 문서" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "시작하기" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 +#, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the configuration " -"for the devcontainer can be a bit more involved. The good thing is you don't " -"have to do it. Usually it should be enough to install `Docker `_ on your system and ensure its available on " -"your command line. Additionally, install the `VSCode Containers Extension " -"`_." +"Configuring and setting up the ``Dockerfile`` as well the configuration " +"for the devcontainer can be a bit more involved. The good thing is you " +"don't have to do it. Usually it should be enough to install `Docker " +"`_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" -"`Dockerfile`을 설정하고 구성하는 것과 개발 컨테이너 구성은 약간 복잡할 수 있" -"습니다. 다행히도, 이를 직접 할 필요는 없습니다. 일반적으로 시스템에 `Docker " -"`_를 설치하고 커맨드 라인에서 사용" -"할 수 있는지 확인하는 것으로 충분합니다. 추가로 `VSCode Containers Extension " +"`Dockerfile`을 설정하고 구성하는 것과 개발 컨테이너 구성은 약간 복잡할 수 있습니다. 다행히도, 이를 직접 할 필요는 " +"없습니다. 일반적으로 시스템에 `Docker `_를 " +"설치하고 커맨드 라인에서 사용할 수 있는지 확인하는 것으로 충분합니다. 추가로 `VSCode Containers Extension " "`_을 설치하세요." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" -"Now you should be good to go. When starting VSCode, it will ask you to run " -"in the container environment and - if you confirm - automatically build the " -"container and use it. To manually instruct VSCode to use the devcontainer, " -"you can, after installing the extension, click the green area in the bottom " -"left corner of your VSCode window and select the option *(Re)Open Folder in " -"Container*." +"Now you should be good to go. When starting VSCode, it will ask you to " +"run in the container environment and - if you confirm - automatically " +"build the container and use it. To manually instruct VSCode to use the " +"devcontainer, you can, after installing the extension, click the green " +"area in the bottom left corner of your VSCode window and select the " +"option *(Re)Open Folder in Container*." msgstr "" -"이제 준비가 완료되었습니다. VSCode를 시작하면 컨테이너 환경에서 실행할지를 묻" -"고, 확인하면 자동으로 컨테이너를 빌드하고 사용할 것입니다. VSCode에 수동으로 " -"개발 컨테이너를 사용하도록 지시하려면, 확장을 설치한 후, VSCode 창의 왼쪽 하" -"단에 있는 초록색 부을 클릭하고 *(Re)Open Folder in Container* 옵션을 선택하세" -"요." +"이제 준비가 완료되었습니다. VSCode를 시작하면 컨테이너 환경에서 실행할지를 묻고, 확인하면 자동으로 컨테이너를 빌드하고 사용할" +" 것입니다. VSCode에 수동으로 개발 컨테이너를 사용하도록 지시하려면, 확장을 설치한 후, VSCode 창의 왼쪽 하단에 있는 " +"초록색 부을 클릭하고 *(Re)Open Folder in Container* 옵션을 선택하세요." -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" -"In some cases your setup might be more involved. For those cases consult the " -"following sources:" -msgstr "" -"경우에 따라 설정이 더 복잡할 수도 있습니다. 이러한 경우에는 다음 소스를 참조" -"하세요:" +"In some cases your setup might be more involved. For those cases consult " +"the following sources:" +msgstr "경우에 따라 설정이 더 복잡할 수도 있습니다. 이러한 경우에는 다음 소스를 참조하세요:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 msgid "" -"`Developing inside a Container `_" +"`Developing inside a Container " +"`_" msgstr "" -"`컨테이너 내부 개발`_" +"`컨테이너 내부 개발`_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 msgid "" -"`Remote development in Containers `_" -msgstr "" -"`컨테이너 원격 개발`_" +"`Remote development in Containers " +"`_" +msgstr "`컨테이너 원격 개발`_" #: ../../source/contributor-how-to-install-development-versions.rst:2 msgid "Install development versions" @@ -581,22 +741,21 @@ msgstr "Poetry 사용하기(권장)" #: ../../source/contributor-how-to-install-development-versions.rst:10 msgid "" -"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in " -"``pyproject.toml`` and then reinstall (don't forget to delete ``poetry." -"lock`` (``rm poetry.lock``) before running ``poetry install``)." +"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency " +"in ``pyproject.toml`` and then reinstall (don't forget to delete " +"``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" -"PyPI에서 ``flwr`` 사전 릴리스 설치하기: ``pyproject.toml``에서 ``flwr``의 " -"의존성을 업데이트한 다음, 재설치하세요(``poetry 설치``이전에 ``poetry.lock`` " -"(``rm poetry.lock``)를 제거하는 것을 잊지 마세요)." +"PyPI에서 ``flwr`` 사전 릴리스 설치하기: ``pyproject.toml``에서 ``flwr``의 의존성을 업데이트한 " +"다음, 재설치하세요(``poetry 설치``이전에 ``poetry.lock`` (``rm poetry.lock``)를 제거하는 것을" +" 잊지 마세요)." -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" -msgstr "" -"``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (extras 제외)" +msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -604,39 +763,37 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" -"Install ``flwr`` from a local copy of the Flower source code via ``pyproject." -"toml``:" -msgstr "" -"``pyproject.toml``을 통해 Flower 소스 코드의 로컬 복사본에서 ``flwr``을 설치" -"하세요:" +"Install ``flwr`` from a local copy of the Flower source code via " +"``pyproject.toml``:" +msgstr "``pyproject.toml``을 통해 Flower 소스 코드의 로컬 복사본에서 ``flwr``을 설치하세요:" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" -"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] }`` " -"(with extras)" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (with extras)" msgstr "" -"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] }`` " -"(extras 포함)" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "``pyproject.toml``을 통해 로컬 wheel file에서 ``flwr``을 설치하세요:" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" -"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without " -"extras)" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" +" extras)" msgstr "" -"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (extras 제" -"외)" +"``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (extras " +"제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" @@ -644,62 +801,62 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" msgstr "" -"자세한 내용은 Poetry 문서를 참고하세요: `Poetry Dependency Specification " -"`_" +"자세한 내용은 Poetry 문서를 참고하세요: `Poetry Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "pip 사용하기(Colab에서 권장)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "PyPI에서 ``flwr`` 사전 릴리즈를 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U --pre flwr`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "``pip install -U --pre 'flwr[simulation]'`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "" -"Python 패키지는 git 저장소에서 설치할 수 있습니다. 다음 명령어 중 하나를 사용" -"하여 GitHub에서 직접 Flower를 설치하세요." +"Python 패키지는 git 저장소에서 설치할 수 있습니다. 다음 명령어 중 하나를 사용하여 GitHub에서 직접 Flower를 " +"설치하세요." -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "기본 GitHub branch (``main``)에서 ``flwr`` 를 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" -"``pip install flwr@git+https://github.com/adap/flower.git`` (without extras)" -msgstr "" -"``pip install flwr@git+https://github.com/adap/flower.git`` (extras 제외)" +"``pip install flwr@git+https://github.com/adap/flower.git`` (without " +"extras)" +msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(extras 포함)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "특정 GitHub branch (``branch-name``)에서 ``flwr`` 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -707,75 +864,73 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(extras 제외)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-" -"name'`` (with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (with extras)" msgstr "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-" -"name'`` (extras 포함)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name'`` (extras 포함)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "Google Colab에서 Jupyter Notebooks 열기" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "" -"``doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb``notebook" -"을 엽니다:" +"``doc/source/tutorial-series-get-started-with-flower-" +"pytorch.ipynb``notebook을 엽니다:" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" -"https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" -"tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -"https://colab.research.google.com/github/adap/flower/blob/main/doc/source/" -"tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-series-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" -"Open a development version of the same notebook from branch `branch-name` by " -"changing ``main`` to ``branch-name`` (right after ``blob``):" +"Open a development version of the same notebook from branch `branch-name`" +" by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" -"``main``을 ``branch-name``(``blob`` 바로 뒤)으로 변경하여 동일한 notebook의 " -"개발 버전을 브랜치 `branch-name`에서 엽니다 :" +"``main``을 ``branch-name``(``blob`` 바로 뒤)으로 변경하여 동일한 notebook의 개발 버전을 브랜치 " +"`branch-name`에서 엽니다 :" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 msgid "" -"https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/" -"source/tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -"https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/" -"source/tutorial-series-get-started-with-flower-pytorch.ipynb" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "Google Colab에서 `whl` 설치하기:" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" -"In the vertical icon grid on the left hand side, select ``Files`` > ``Upload " -"to session storage``" -msgstr "" -"왼쪽의 수직 아이콘 그리드에서 ``Files`` > ``Upload to session storage``를 선" -"택하세요" +"In the vertical icon grid on the left hand side, select ``Files`` > " +"``Upload to session storage``" +msgstr "왼쪽의 수직 아이콘 그리드에서 ``Files`` > ``Upload to session storage``를 선택하세요" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "whl (예:``flwr-1.8.0-py3-none-any.whl``)을 업로드하세요" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" -"Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` " -"to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch " -"torchvision matplotlib``" +"Change ``!pip install -q 'flwr[simulation]' torch torchvision " +"matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" +"any.whl[simulation]' torch torchvision matplotlib``" msgstr "" -"``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``를 ``!pip " -"install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision " -"matplotlib``로 바꾸세요" +"``!pip install -q 'flwr[simulation]' torch torchvision matplotlib``를 " +"``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch " +"torchvision matplotlib``로 바꾸세요" #: ../../source/contributor-how-to-release-flower.rst:2 msgid "Release Flower" @@ -785,254 +940,185 @@ msgstr "Flower 릴리즈 하기" msgid "" "This document describes the current release process. It may or may not " "change in the future." -msgstr "" -"이 문서는 현재 릴리즈 과정을 설명합니다. 이는 앞으로 변경될 수도 있습니다." +msgstr "이 문서는 현재 릴리즈 과정을 설명합니다. 이는 앞으로 변경될 수도 있습니다." -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "릴리즈 동안에" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" -"The version number of a release is stated in ``pyproject.toml``. To release " -"a new version of Flower, the following things need to happen (in that order):" +"The version number of a release is stated in ``pyproject.toml``. To " +"release a new version of Flower, the following things need to happen (in " +"that order):" msgstr "" -"릴리즈의 버전 번호는 ``pyproject.toml``에 명시되어 있습니다. Flower의 새 버전" -"을 릴리즈하려면 다음 작업이 순서대로 수행되어야 합니다:" +"릴리즈의 버전 번호는 ``pyproject.toml``에 명시되어 있습니다. Flower의 새 버전을 릴리즈하려면 다음 작업이 " +"순서대로 수행되어야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -"모든 새로운 변경 사항을 변경 로그에 추가하기 위해``python3 src/py/flwr_tool/" -"update_changelog.py ``을 실행합니다 (변경 로그가 만족스러워질 " -"때까지 수동으로 변경해도 됩니다)." +"모든 새로운 변경 사항을 변경 로그에 추가하기 위해``python3 " +"src/py/flwr_tool/update_changelog.py ``을 실행합니다 (변경 로그가 " +"만족스러워질 때까지 수동으로 변경해도 됩니다)." -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" -"Once the changelog has been updated with all the changes, run ``./dev/" -"prepare-release-changelog.sh v``, where ```` is " -"the version stated in ``pyproject.toml`` (notice the ``v`` added before it). " -"This will replace the ``Unreleased`` header of the changelog by the version " -"and current date, and it will add a thanking message for the contributors. " -"Open a pull request with those changes." +"Once the changelog has been updated with all the changes, run ``./dev" +"/prepare-release-changelog.sh v``, where ```` " +"is the version stated in ``pyproject.toml`` (notice the ``v`` added " +"before it). This will replace the ``Unreleased`` header of the changelog " +"by the version and current date, and it will add a thanking message for " +"the contributors. Open a pull request with those changes." msgstr "" -"모든 변경 사항으로 변경 로그가 업데이트되면,``./dev/prepare-release-" -"changelog.sh v``을 실행합니다. 여기서 ````은 " -"``pyproject.toml``에 명시된 버전 번호입니다 (앞에 ``v``가 추가된 것을 주의하" -"세요). 이 명령어는 변경 로그의 ``Unreleased``헤더를 해당 버전과 현재 날짜로 " -"교체하고, 기여자들에게 감사 메시지가 추가됩니다. 이러한 변경 사항으로 pull " -"request합니다." +"모든 변경 사항으로 변경 로그가 업데이트되면,``./dev/prepare-release-changelog.sh " +"v``을 실행합니다. 여기서 ````은 ``pyproject.toml``에 명시된 " +"버전 번호입니다 (앞에 ``v``가 추가된 것을 주의하세요). 이 명령어는 변경 로그의 ``Unreleased``헤더를 해당 버전과" +" 현재 날짜로 교체하고, 기여자들에게 감사 메시지가 추가됩니다. 이러한 변경 사항으로 pull request합니다." -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " -"number as soon as the PR is merged: ``git tag v`` (notice the " -"``v`` added before the version number), then ``git push --tags``. This will " -"create a draft release on GitHub containing the correct artifacts and the " -"relevant part of the changelog." +"number as soon as the PR is merged: ``git tag v`` (notice " +"the ``v`` added before the version number), then ``git push --tags``. " +"This will create a draft release on GitHub containing the correct " +"artifacts and the relevant part of the changelog." msgstr "" -"pull request가 병합되면, PR이 병합되는 즉시 버전 번호로 릴리즈 커밋에 태그를 " -"지정합니다:``git tag v`` (버전 번호 앞에 ``v``가 추가된 것을 확" -"인), 그 다음 ``git push --tags``. 이렇게 하면 올바른 아티팩트와 변경 로그의 " -"관련 부분이 포함된 초안 릴리즈가 GitHub에 생성됩니다." +"pull request가 병합되면, PR이 병합되는 즉시 버전 번호로 릴리즈 커밋에 태그를 지정합니다:``git tag " +"v`` (버전 번호 앞에 ``v``가 추가된 것을 확인), 그 다음 ``git push --tags``. " +"이렇게 하면 올바른 아티팩트와 변경 로그의 관련 부분이 포함된 초안 릴리즈가 GitHub에 생성됩니다." -#: ../../source/contributor-how-to-release-flower.rst:14 -msgid "" -"Check the draft release on GitHub, and if everything is good, publish it." +#: ../../source/contributor-how-to-release-flower.rst:26 +msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "GitHub에서 릴리즈 초안을 확인하고, 모든 것이 양호하면 게시하세요." -#: ../../source/contributor-how-to-release-flower.rst:15 -msgid "Trigger the CI for building the Docker images." -msgstr "Docker 이미지 빌드를 위해 CI를 트리거합니다." - -#: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a ``workflow_dispatch`` " -"event in the GitHub CI. This can be done either through the UI or via the " -"GitHub CLI. The event requires only one input, the Flower version, to be " -"released." -msgstr "" -"워크플로우를 트리거하려면 공동 작업자가 GitHub CI에서 ``workflow_dispatch``" -"를 생성해야 합니다. 이 작업은 UI 또는 GitHub CLI 를 통해 수행할 수 있습니다. " -"이벤트는 Flower 버전 한 가지 입력만 필요합니다." - -#: ../../source/contributor-how-to-release-flower.rst:21 -msgid "**Via the UI**" -msgstr "**UI를 통해서**" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page `_." -msgstr "" -"``Build docker images`` 워크플로우 `페이지 `_로 이동합니다." - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower in " -"the ``Version of Flower`` input field." -msgstr "" -"``Run workflow`` 버튼을 누르고 ``Version of Flower``에 Flower의 새버전을 입력" -"합니다." - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "**초록색**의 ``Run workflow``버튼을 클릭합니다." - #: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "**GitHub CI를 통해서**" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" -"``gh auth login``을 통해 로그인 했는지, 현재 작업 디렉토리가 Flower 리포지토" -"리의 root인지 확인하세요." - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" -"``gh workflow run docker-images.yml -f flwr-version=``을 통해 워" -"크플로우 를 트리거합니다." - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "릴리즈 후에" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "다음 변경 사항이 포함된 pull request를 만듭니다:" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "``pyproject.toml``의 마이너 버전을 하나씩 늘립니다." -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "필요한 경우 현재 버전 번호가 포함된 모든 파일을 업데이트합니다." -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "``changelog.md``에 ``Unreleased`` 섹션을 새로 추가합니다." -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" -"Merge the pull request on the same day (i.e., before a new nightly release " -"gets published to PyPI)." -msgstr "" -"pull request를 같은 날(즉, 새로운 nightly 릴리즈가 PyPI에 게시되기 전에) 병합" -"하세요." +"Merge the pull request on the same day (i.e., before a new nightly " +"release gets published to PyPI)." +msgstr "pull request를 같은 날(즉, 새로운 nightly 릴리즈가 PyPI에 게시되기 전에) 병합하세요." -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "사전 릴리즈 게시" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "사전 릴리즈 이름" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" -"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases " -"MUST use one of the following naming patterns:" -msgstr "" -"PyPI는 사전 릴리즈(알파, 베타, 릴리스 후보)를 지원합니다. 사전 릴리즈는 반드" -"시 다음 명명 패턴 중 하나를 사용해야 합니다:" +"PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" +" MUST use one of the following naming patterns:" +msgstr "PyPI는 사전 릴리즈(알파, 베타, 릴리스 후보)를 지원합니다. 사전 릴리즈는 반드시 다음 명명 패턴 중 하나를 사용해야 합니다:" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "Alpha: ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "Beta: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "예시:" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" -msgstr "" -"이는 PEP-440 및 Python Packaging Authority (PyPA)의 권장 사항과 일치합니다:" +msgstr "이는 PEP-440 및 Python Packaging Authority (PyPA)의 권장 사항과 일치합니다:" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" -"`PyPA Choosing a versioning scheme `_" +"`PyPA Choosing a versioning scheme " +"`_" msgstr "" -"`PyPA 버전 관리 체계 선택하기 `_" +"`PyPA 버전 관리 체계 선택하기 `_" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" -"Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 " -"spec, for details consult the `Semantic Versioning Specification `_ (specifically item 11 on " -"precedence)." +"Note that the approach defined by PyPA is not compatible with SemVer " +"2.0.0 spec, for details consult the `Semantic Versioning Specification " +"`_ (specifically item " +"11 on precedence)." msgstr "" -"PyPA에서 정의한 접근 방식은 SemVer 2.0.0 사양과 호환되지 않으며, 자세한 내용" -"은`Semantic Versioning 관리 사양 `_ (특히 항목 11이 우선순위)을 참조하세요." +"PyPA에서 정의한 접근 방식은 SemVer 2.0.0 사양과 호환되지 않으며, 자세한 내용은`Semantic Versioning " +"관리 사양 `_ (특히 항목 11이 " +"우선순위)을 참조하세요." #: ../../source/contributor-how-to-release-flower.rst:73 msgid "Pre-release classification" msgstr "사전 릴리즈 분류" #: ../../source/contributor-how-to-release-flower.rst:75 -msgid "" -"Should the next pre-release be called alpha, beta, or release candidate?" +msgid "Should the next pre-release be called alpha, beta, or release candidate?" msgstr "다음 사전 릴리즈를 알파, 베타 또는 릴리스 후보라고 불러야 하나요?" #: ../../source/contributor-how-to-release-flower.rst:77 msgid "" -"RC: feature complete, no known issues (apart from issues that are classified " -"as \"won't fix\" for the next stable release) - if no issues surface this " -"will become the next stable release" +"RC: feature complete, no known issues (apart from issues that are " +"classified as \"won't fix\" for the next stable release) - if no issues " +"surface this will become the next stable release" msgstr "" -"RC: 기능 완료, 알려진 문제 없음(다음 stable 릴리즈에서 \"수정되지 않음\"으로 " -"분류된 문제 제외) - 문제가 나타나지 않으면 다음 stable 릴리즈가 됩니다" +"RC: 기능 완료, 알려진 문제 없음(다음 stable 릴리즈에서 \"수정되지 않음\"으로 분류된 문제 제외) - 문제가 나타나지 " +"않으면 다음 stable 릴리즈가 됩니다" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "베타: 기능 완료, 알려진 문제 발생 가능" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "알파: 기능 미완성, 알려진 문제가 있을 수 있음" @@ -1044,165 +1130,162 @@ msgstr "가상 환경 설정" msgid "" "It is recommended to run your Python setup within a virtual environment. " "This guide shows three different examples how to create a virtual " -"environment with pyenv virtualenv, poetry, or Anaconda. You can follow the " -"instructions or choose your preferred setup." +"environment with pyenv virtualenv, poetry, or Anaconda. You can follow " +"the instructions or choose your preferred setup." msgstr "" -"가상 환경 내에서 파이썬 설정을 실행하는 것이 좋습니다. 이 가이드에서는 pyenv " -"virtualenv, poetry 또는 Anaconda를 사용하여 가상 환경을 만드는 세 가지 예제" -"를 보여줍니다. 안내를 따르거나 원하는 설정을 선택할 수 있습니다." +"가상 환경 내에서 파이썬 설정을 실행하는 것이 좋습니다. 이 가이드에서는 pyenv virtualenv, poetry 또는 " +"Anaconda를 사용하여 가상 환경을 만드는 세 가지 예제를 보여줍니다. 안내를 따르거나 원하는 설정을 선택할 수 있습니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Python 버전" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" -"Flower requires at least `Python 3.8 `_, but " -"`Python 3.10 `_ or above is recommended." +"Flower requires at least `Python 3.9 `_, " +"but `Python 3.10 `_ or above is " +"recommended." msgstr "" -"Flower는 `Python 3.8 `_이상이 필요하지만, " -"`Python 3.10 `_이상을 권장합니다." +"Flower는 `Python 3.9 `_이상이 필요하지만, `Python " +"3.10 `_이상을 권장합니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" -"Due to a known incompatibility with `ray `_, " -"we currently recommend utilizing at most `Python 3.11 `_ for running Flower simulations." +"Due to a known incompatibility with `ray " +"`_, we currently recommend utilizing at " +"most `Python 3.11 `_ for running Flower " +"simulations." msgstr "" -"`Ray `__와 호환되지 않는 것으로 알려져 있으므" -"로, 현재 Flower 시뮬레이션을 실행할 때는 최대 `Python 3.11 `_을 사용하는 것이 좋습니다." +"`Ray `__와 호환되지 않는 것으로 알려져 있으므로, 현재 Flower" +" 시뮬레이션을 실행할 때는 최대 `Python 3.11 `_을 사용하는 것이" +" 좋습니다." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Pyenv/Virtualenv를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" -"One of the recommended virtual environment is `pyenv `_/`virtualenv `_. " -"Please see `Flower examples `_ for details." +"One of the recommended virtual environment is `pyenv " +"`_/`virtualenv `_. Please see `Flower examples " +"`_ for details." msgstr "" -"권장 가상 환경 중 하나는 `pyenv `_/" -"`virtualenv `_입니다. 자세한 내용" -"은 `Flower examples `_를 " -"참조하세요." +"권장 가상 환경 중 하나는 `pyenv `_/`virtualenv " +"`_입니다. 자세한 내용은 `Flower " +"examples `_를 참조하세요." -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "" -"Pyenv가 설정되면 이를 사용하여 'Python 버전 3.10 `_ 이상'을 설치할 수 있습니다:" +"Pyenv가 설정되면 이를 사용하여 'Python 버전 3.10 `_ " +"이상'을 설치할 수 있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "가상 환경을 만듭니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "Poetry를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" -"The Flower examples are based on `Poetry `_ " -"to manage dependencies. After installing Poetry you simply create a virtual " -"environment with:" +"The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " +"simply create a virtual environment with:" msgstr "" -"Flower examples은 의존성을 관리하기 위해 `Poetry `_를 기반으로 합니다. Poetry를 설치한 후 가상 환경을 생성하기만 하면 " -"됩니다:" +"Flower examples은 의존성을 관리하기 위해 `Poetry `_를 기반으로 합니다. Poetry를 설치한 후 가상 환경을 생성하기만 하면 됩니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" -"If you open a new terminal you can activate the previously created virtual " -"environment with the following command:" -msgstr "" -"새 터미널을 열면 다음 명령을 사용하여 이전에 생성한 가상 환경을 활성화할 수 " -"있습니다:" +"If you open a new terminal you can activate the previously created " +"virtual environment with the following command:" +msgstr "새 터미널을 열면 다음 명령을 사용하여 이전에 생성한 가상 환경을 활성화할 수 있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "Anaconda를 사용한 가상 환경" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" -"If you prefer to use Anaconda for your virtual environment then install and " -"setup the `conda `_ package. After setting it up you can create a virtual " -"environment with:" +"If you prefer to use Anaconda for your virtual environment then install " +"and setup the `conda `_ package. After setting it up you can " +"create a virtual environment with:" msgstr "" -"가상 환경에서 Anaconda를 사용하려면 `conda `_ 패키지를 설치 및 " -"설정하세요. 설정 후 다음을 사용하여 가상 환경을 만들 수 있습니다:" +"가상 환경에서 Anaconda를 사용하려면 `conda " +"`_ 패키지를 설치 및 설정하세요. 설정 후 다음을 사용하여 가상 환경을 만들 수 " +"있습니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "그 후 가상 환경을 활성화합니다:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "그다음은?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" -"As soon as you created your virtual environment you clone one of the `Flower " -"examples `_." +"As soon as you created your virtual environment you clone one of the " +"`Flower examples `_." msgstr "" -"가상 환경을 생성하자마자 'Flower examples `_ 중 하나를 클론합니다." +"가상 환경을 생성하자마자 'Flower examples " +"`_ 중 하나를 클론합니다." #: ../../source/contributor-how-to-write-documentation.rst:2 msgid "Write documentation" msgstr "문서 작성" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "프로젝트 레이아웃" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" -"The Flower documentation lives in the ``doc`` directory. The Sphinx-based " -"documentation system supports both reStructuredText (``.rst`` files) and " -"Markdown (``.md`` files)." +"The Flower documentation lives in the ``doc`` directory. The Sphinx-based" +" documentation system supports both reStructuredText (``.rst`` files) and" +" Markdown (``.md`` files)." msgstr "" -"Flower 문서는 ``doc`` 디렉토리에 있습니다. Sphinx 기반 문서 시스템은 " -"reStructuredText 텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합" -"니다." +"Flower 문서는 ``doc`` 디렉토리에 있습니다. Sphinx 기반 문서 시스템은 reStructuredText " +"텍스트(``.rst`` 파일)와 Markdown(``.md`` 파일)을 모두 지원합니다." #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 msgid "" -"Note that, in order to build the documentation locally (with ``poetry run " -"make html``, like described below), `Pandoc `_ needs to be installed on the system." +"Note that, in order to build the documentation locally (with ``poetry run" +" make html``, like described below), `Pandoc " +"`_ needs to be installed on the " +"system." msgstr "" -"로컬에서 문서를 작성하려면(아래 설명과 같이 ``poetry run make html``로) " -"`Pandoc `_이 시스템에 설치되어 있어야 합" -"니다." +"로컬에서 문서를 작성하려면(아래 설명과 같이 ``poetry run make html``로) `Pandoc " +"`_이 시스템에 설치되어 있어야 합니다." -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "기존 페이지 편집" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "doc/source/``에서 기존 ``.rst``(또는 ``.md``) 파일을 편집합니다" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" -msgstr "" -"문서를 컴파일합니다: ``cd doc``, ``poetry run make html`` 순으로 컴파일합니다" +msgstr "문서를 컴파일합니다: ``cd doc``, ``poetry run make html`` 순으로 컴파일합니다" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "브라우저에서 ``doc/build/html/index.html``을 열어 결과를 확인합니다" @@ -1229,73 +1312,71 @@ msgstr "훌륭한 첫 번째 기여" #: ../../source/contributor-ref-good-first-contributions.rst:4 msgid "" -"We welcome contributions to Flower! However, it is not always easy to know " -"where to start. We therefore put together a few recommendations on where to " -"start to increase your chances of getting your PR accepted into the Flower " -"codebase." +"We welcome contributions to Flower! However, it is not always easy to " +"know where to start. We therefore put together a few recommendations on " +"where to start to increase your chances of getting your PR accepted into " +"the Flower codebase." msgstr "" -"Flower에 대한 기여를 환영합니다! 하지만 어디서부터 시작해야 할지 알기란 쉽지 " -"않습니다. 그래서 저희는 여러분의 PR이 Flower 코드베이스에 채택될 가능성을 높" -"이기 위해 어디서부터 시작해야 하는지 몇 가지 권장 사항을 정리해 보았습니다." +"Flower에 대한 기여를 환영합니다! 하지만 어디서부터 시작해야 할지 알기란 쉽지 않습니다. 그래서 저희는 여러분의 PR이 " +"Flower 코드베이스에 채택될 가능성을 높이기 위해 어디서부터 시작해야 하는지 몇 가지 권장 사항을 정리해 보았습니다." -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "시작 위치" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" -"Until the Flower core library matures it will be easier to get PR's accepted " -"if they only touch non-core areas of the codebase. Good candidates to get " -"started are:" +"Until the Flower core library matures it will be easier to get PR's " +"accepted if they only touch non-core areas of the codebase. Good " +"candidates to get started are:" msgstr "" -"Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 " -"것이 PR을 승인받기가 더 쉬울 것입니다. 시작하기에 좋은 후보자는 다음과 같습니" -"다:" +"Flower 코어 라이브러리가 완성될 때까지는 코드베이스의 비핵심 영역만 건드리는 것이 PR을 승인받기가 더 쉬울 것입니다. " +"시작하기에 좋은 후보자는 다음과 같습니다:" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "문서: 무엇이 누락되었나요? 무엇을 더 명확하게 표현할 수 있을까요?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Baselines: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "예시: 아래를 참조하세요." -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Flower Baselines 요청" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 msgid "" -"If you are not familiar with Flower Baselines, you should probably check-out " -"our `contributing guide for baselines `_." +"If you are not familiar with Flower Baselines, you should probably check-" +"out our `contributing guide for baselines " +"`_." msgstr "" -"Flower Baseline에 익숙하지 않다면 ' Baseline 기여 가이드 `_를 확인해보세요." +"Flower Baseline에 익숙하지 않다면 ' Baseline 기여 가이드 " +"`_를 " +"확인해보세요." -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" -"You should then check out the open `issues `_ for baseline " -"requests. If you find a baseline that you'd like to work on and that has no " -"assignees, feel free to assign it to yourself and start working on it!" +"You should then check out the open `issues " +"`_" +" for baseline requests. If you find a baseline that you'd like to work on" +" and that has no assignees, feel free to assign it to yourself and start " +"working on it!" msgstr "" -"그런 다음 오픈 된 `issues `_에서 baseline " -"요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, " -"자유롭게 자신에게 할당하고 작업을 시작하세요!" +"그런 다음 오픈 된 `issues " +"`_에서" +" baseline 요청을 확인해야 합니다. 작업하고 싶은 기준선을 찾았지만 담당자가 없는 경우, 자유롭게 자신에게 할당하고 작업을 " +"시작하세요!" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" -"Otherwise, if you don't find a baseline you'd like to work on, be sure to " -"open a new issue with the baseline request template!" -msgstr "" -"그렇지 않으면 작업하고 싶은 baseline을 찾지 못하면 baseline 요청 템플릿으로 " -"새 이슈를 열어야 합니다!" +"Otherwise, if you don't find a baseline you'd like to work on, be sure to" +" open a new issue with the baseline request template!" +msgstr "그렇지 않으면 작업하고 싶은 baseline을 찾지 못하면 baseline 요청 템플릿으로 새 이슈를 열어야 합니다!" #: ../../source/contributor-ref-good-first-contributions.rst:34 msgid "Request for examples" @@ -1304,12 +1385,11 @@ msgstr "예시 요청" #: ../../source/contributor-ref-good-first-contributions.rst:36 msgid "" "We wish we had more time to write usage examples because we believe they " -"help users to get started with building what they want to build. Here are a " -"few ideas where we'd be happy to accept a PR:" +"help users to get started with building what they want to build. Here are" +" a few ideas where we'd be happy to accept a PR:" msgstr "" -"사용 예시는 사용자가 원하는 것을 구축하는 데 도움이 된다고 생각하기 때문에 " -"더 많은 시간을 할애하여 작성할 수 있었으면 합니다. 다음은 저희가 기꺼이 PR을 " -"수락할 수 있는 몇 가지 아이디어입니다:" +"사용 예시는 사용자가 원하는 것을 구축하는 데 도움이 된다고 생각하기 때문에 더 많은 시간을 할애하여 작성할 수 있었으면 합니다. " +"다음은 저희가 기꺼이 PR을 수락할 수 있는 몇 가지 아이디어입니다:" #: ../../source/contributor-ref-good-first-contributions.rst:40 msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" @@ -1329,43 +1409,43 @@ msgstr "Secure Aggregation 프로토콜" #: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 msgid "" -"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol " -"has not been implemented yet, so its diagram and abstraction may not be " -"accurate in practice. The SecAgg protocol can be considered as a special " -"case of the SecAgg+ protocol." +"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " +"protocol has not been implemented yet, so its diagram and abstraction may" +" not be accurate in practice. The SecAgg protocol can be considered as a " +"special case of the SecAgg+ protocol." msgstr "" -"SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. LightSecAgg 프로토콜은 아" -"직 구현되지 않았기 때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있" -"습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 간주할 수 있습니" -"다." +"SecAgg, SecAgg+, LightSecAgg 프로토콜을 포함합니다. LightSecAgg 프로토콜은 아직 구현되지 않았기 " +"때문에 다이어그램과 추상화가 실제로는 정확하지 않을 수 있습니다. SecAgg 프로토콜은 SecAgg+ 프로토콜의 특수한 경우로 " +"간주할 수 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "The :code:`SecAgg+` 추상화" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " -"(int) for secure aggregation, and thus many python dictionaries used have " -"keys of int type rather than ClientProxy type." +"(int) for secure aggregation, and thus many python dictionaries used have" +" keys of int type rather than ClientProxy type." msgstr "" -"구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당" -"되므로 사용되는 많은 파이썬 dictionaries에는 ClientProxy 타입이 아닌 int 타입" -"의 키가 있습니다." +"구현에서는 각 클라이언트에 secure aggregation를 위한 고유 인덱스(int)가 할당되므로 사용되는 많은 파이썬 " +"dictionaries에는 ClientProxy 타입이 아닌 int 타입의 키가 있습니다." -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" -"The Flower server will execute and process received results in the following " -"order:" +"The Flower server will execute and process received results in the " +"following order:" msgstr "Flower 서버는 수신된 결과를 다음 순서로 실행하고 처리합니다:" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "The :code:`LightSecAgg` 추상" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "타입" @@ -1375,761 +1455,708 @@ msgstr "GitHub에서 기여하기" #: ../../source/contributor-tutorial-contribute-on-github.rst:4 msgid "" -"This guide is for people who want to get involved with Flower, but who are " -"not used to contributing to GitHub projects." -msgstr "" -"이 가이드는 Flower에 참여하고 싶지만 GitHub 프로젝트에 기여하는 데 익숙하지 " -"않은 분들을 위한 것입니다." +"This guide is for people who want to get involved with Flower, but who " +"are not used to contributing to GitHub projects." +msgstr "이 가이드는 Flower에 참여하고 싶지만 GitHub 프로젝트에 기여하는 데 익숙하지 않은 분들을 위한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 msgid "" -"If you're familiar with how contributing on GitHub works, you can directly " -"checkout our :doc:`getting started guide for contributors `." +"If you're familiar with how contributing on GitHub works, you can " +"directly checkout our :doc:`getting started guide for contributors " +"`." msgstr "" -"깃허브에서 기여하는 방식에 익숙하다면 :doc:`기여자를 위한 시작 가이드" -"`를 직접 확인하세요." +"깃허브에서 기여하는 방식에 익숙하다면 :doc:`기여자를 위한 시작 가이드`를 직접 확인하세요." -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "레포지토리 설정하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**GitHub 계정을 만들고 Git을 설정합니다**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " "software that will need to be installed on your local machine, you can " -"follow this `guide `_ to set it up." +"follow this `guide `_ to set it up." msgstr "" -"Git은 분산 버전 관리 도구입니다. 이를 통해 전체 코드베이스의 히스토리와 모든 " -"개발자의 컴퓨터를 저장할 수 있습니다. 로컬 컴퓨터에 설치해야 하는 소프트웨어" -"로, 이 `가이드 `_를 따라 설정할 수 있습니다." +"Git은 분산 버전 관리 도구입니다. 이를 통해 전체 코드베이스의 히스토리와 모든 개발자의 컴퓨터를 저장할 수 있습니다. 로컬 " +"컴퓨터에 설치해야 하는 소프트웨어로, 이 `가이드 `_를 따라 설정할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " -"collaboration. It allows for everyone to collaborate and work from anywhere " -"on remote repositories." +"collaboration. It allows for everyone to collaborate and work from " +"anywhere on remote repositories." msgstr "" -"GitHub는 그 자체로 버전 관리 및 협업을 위한 코드 호스팅 플랫폼입니다. 누구나 " -"원격 레포지토리에서 어디서든 협업하고 작업할 수 있습니다." +"GitHub는 그 자체로 버전 관리 및 협업을 위한 코드 호스팅 플랫폼입니다. 누구나 원격 레포지토리에서 어디서든 협업하고 작업할 " +"수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." -msgstr "" -"아직 계정을 만들지 않았다면 `GitHub `_에서 계정을 " -"만들어야 합니다." +msgstr "아직 계정을 만들지 않았다면 `GitHub `_에서 계정을 만들어야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" -"The idea behind the generic Git and GitHub workflow boils down to this: you " -"download code from a remote repository on GitHub, make changes locally and " -"keep track of them using Git and then you upload your new history back to " -"GitHub." +"The idea behind the generic Git and GitHub workflow boils down to this: " +"you download code from a remote repository on GitHub, make changes " +"locally and keep track of them using Git and then you upload your new " +"history back to GitHub." msgstr "" -"일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. " -"GitHub의 원격 레포지토리에서 코드를 다운로드하고 로컬에서 변경한 후 Git을 사" -"용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." +"일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. GitHub의 원격 레포지토리에서 코드를 " +"다운로드하고 로컬에서 변경한 후 Git을 사용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**Flower 레포지토리 포크하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#, fuzzy msgid "" -"A fork is a personal copy of a GitHub repository. To create one for Flower, " -"you must navigate to ``_ (while connected to " -"your GitHub account) and click the ``Fork`` button situated on the top right " -"of the page." +"A fork is a personal copy of a GitHub repository. To create one for " +"Flower, you must navigate to https://github.com/adap/flower (while " +"connected to your GitHub account) and click the ``Fork`` button situated " +"on the top right of the page." msgstr "" "포크는 GitHub 리포지토리의 개인 복사본입니다. Flower용 포크를 만들려면 " -"``_로 이동하여(GitHub 계정에 연결된 상태에" -"서) 페이지 오른쪽 상단에 있는 ``포크`` 버튼을 클릭해야 합니다." +"``_로 이동하여(GitHub 계정에 연결된 상태에서) 페이지 오른쪽 " +"상단에 있는 ``포크`` 버튼을 클릭해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " -"version of Flower will be yours and will sit inside your own account (i.e., " -"in your own list of repositories). Once created, you should see on the top " -"left corner that you are looking at your own version of Flower." +"version of Flower will be yours and will sit inside your own account " +"(i.e., in your own list of repositories). Once created, you should see on" +" the top left corner that you are looking at your own version of Flower." msgstr "" -"원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신" -"의 리포지토리 목록)에 위치하게 되므로 변경할 필요는 없습니다. 만들기가 완료되" -"면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." +"원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신의 리포지토리 목록)에 위치하게 되므로 변경할" +" 필요는 없습니다. 만들기가 완료되면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**포크된 레포지토리 클론하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " -"able to make changes to it. On your forked repository page, you should first " -"click on the ``Code`` button on the right, this will give you the ability to " -"copy the HTTPS link of the repository." +"able to make changes to it. On your forked repository page, you should " +"first click on the ``Code`` button on the right, this will give you the " +"ability to copy the HTTPS link of the repository." msgstr "" -"다음 단계는 컴퓨터에서 포크된 레포지토리를 변경할 수 있도록 다운로드하는 것입" -"니다. 포크된 포지토리 페이지에서 먼저 오른쪽의 ``Code`` 버튼을 클릭하면 레포" -"지토리의 HTTPS 링크를 복사할 수 있습니다." +"다음 단계는 컴퓨터에서 포크된 레포지토리를 변경할 수 있도록 다운로드하는 것입니다. 포크된 포지토리 페이지에서 먼저 오른쪽의 " +"``Code`` 버튼을 클릭하면 레포지토리의 HTTPS 링크를 복사할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" -msgstr "" -"\\를 복사한 후에는 컴퓨터에서 터미널을 열고 레포지토리를 다운로드할 위" -"치로 이동하여 입력하면 됩니다:" +msgstr "\\를 복사한 후에는 컴퓨터에서 터미널을 열고 레포지토리를 다운로드할 위치로 이동하여 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "" -"This will create a ``flower/`` (or the name of your fork if you renamed it) " -"folder in the current working directory." -msgstr "" -"현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더" -"가 생성됩니다." +"This will create a ``flower/`` (or the name of your fork if you renamed " +"it) folder in the current working directory." +msgstr "현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더가 생성됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**origin 추가**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "그런 다음 레포지토리 폴더로 이동할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" -"And here we will need to add an origin to our repository. The origin is the " -"\\ of the remote fork repository. To obtain it, we can do as " -"previously mentioned by going to our fork repository on our GitHub account " -"and copying the link." +"And here we will need to add an origin to our repository. The origin is " +"the \\ of the remote fork repository. To obtain it, we can do as " +"previously mentioned by going to our fork repository on our GitHub " +"account and copying the link." msgstr "" -"여기에 레포지토리에 origin을 추가해야 합니다. origin은 원격 포크 레포지토리" -"의 \\입니다. origin을 얻으려면 앞서 설명한 대로 GitHub 계정의 포크 레" -"포지토리로 이동하여 링크를 복사하면 됩니다." +"여기에 레포지토리에 origin을 추가해야 합니다. origin은 원격 포크 레포지토리의 \\입니다. origin을 " +"얻으려면 앞서 설명한 대로 GitHub 계정의 포크 레포지토리로 이동하여 링크를 복사하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "\\ 이 복사되면 터미널에 다음 명령을 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**Upstream 추가하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" -msgstr "" -"이제 레포지토리에 upstream 주소를 추가하겠습니다. 여전히 같은 디렉터리에서 다" -"음 명령을 실행해야 합니다:" +msgstr "이제 레포지토리에 upstream 주소를 추가하겠습니다. 여전히 같은 디렉터리에서 다음 명령을 실행해야 합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 -msgid "" -"The following diagram visually explains what we did in the previous steps:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 +msgid "The following diagram visually explains what we did in the previous steps:" msgstr "다음 다이어그램은 이전 단계에서 수행한 작업을 시각적으로 설명합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" -"The upstream is the GitHub remote address of the parent repository (in this " -"case Flower), i.e. the one we eventually want to contribute to and therefore " -"need an up-to-date history of. The origin is just the GitHub remote address " -"of the forked repository we created, i.e. the copy (fork) in our own account." +"The upstream is the GitHub remote address of the parent repository (in " +"this case Flower), i.e. the one we eventually want to contribute to and " +"therefore need an up-to-date history of. The origin is just the GitHub " +"remote address of the forked repository we created, i.e. the copy (fork) " +"in our own account." msgstr "" -"upstream은 부모 레포지토리(이 경우 Flower)의 GitHub 원격 주소, 즉 우리가 최종" -"적으로 기여하고 싶고 따라서 최신 기록이 필요한 레포지토리입니다. origin은 우" -"리가 만든 포크된 레포지토리의 GitHub 원격 주소, 즉 우리 계정에 있는 사본(포" -"크)입니다." +"upstream은 부모 레포지토리(이 경우 Flower)의 GitHub 원격 주소, 즉 우리가 최종적으로 기여하고 싶고 따라서 최신" +" 기록이 필요한 레포지토리입니다. origin은 우리가 만든 포크된 레포지토리의 GitHub 원격 주소, 즉 우리 계정에 있는 " +"사본(포크)입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" -msgstr "" -"로컬 버전의 포크가 Flower 레포지토리의 최신 변경 사항으로 최신 상태인지 확인" -"하려면 다음 명령을 실행하면 됩니다:" +msgstr "로컬 버전의 포크가 Flower 레포지토리의 최신 변경 사항으로 최신 상태인지 확인하려면 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "코딩 환경 설정" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 msgid "" "This can be achieved by following this :doc:`getting started guide for " -"contributors ` (note that " -"you won't need to clone the repository). Once you are able to write code and " -"test it, you can finally start making changes!" +"contributors ` (note " +"that you won't need to clone the repository). Once you are able to write " +"code and test it, you can finally start making changes!" msgstr "" -":doc:'기여자를 위한 시작 가이드 '를 참조하세요(레포지토리를 복제할 필요는 없습니다). 코드를 " -"작성하고 테스트할 수 있게 되면 드디어 변경을 시작할 수 있습니다!" +":doc:'기여자를 위한 시작 가이드 '를 참조하세요(레포지토리를 복제할 필요는 없습니다). 코드를 작성하고 테스트할 수 있게 되면 드디어" +" 변경을 시작할 수 있습니다!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "변경하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" -"Before making any changes make sure you are up-to-date with your repository:" +"Before making any changes make sure you are up-to-date with your " +"repository:" msgstr "변경하기 전에 레포지토리를 최신 상태로 유지하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "Flower의 레포지토리도 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**새 브랜치 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" -"To make the history cleaner and easier to work with, it is good practice to " -"create a new branch for each feature/project that needs to be implemented." -msgstr "" -"히스토리를 더 깔끔하고 작업하기 쉽게 만들려면 구현해야 하는 각 기능/프로젝트" -"에 대해 새 브랜치를 만드는 것이 좋습니다." +"To make the history cleaner and easier to work with, it is good practice " +"to create a new branch for each feature/project that needs to be " +"implemented." +msgstr "히스토리를 더 깔끔하고 작업하기 쉽게 만들려면 구현해야 하는 각 기능/프로젝트에 대해 새 브랜치를 만드는 것이 좋습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" -"To do so, just run the following command inside the repository's directory:" +"To do so, just run the following command inside the repository's " +"directory:" msgstr "이렇게 하려면 레포지토리 디렉토리에서 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**변경하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 -msgid "" -"Write great code and create wonderful changes using your favorite editor!" +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 +msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "선호하는 편집기를 사용하여 멋진 코드를 작성하고 훌륭한 변화를 만들어 보세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**코드 테스트 및 서식 지정**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" -"Don't forget to test and format your code! Otherwise your code won't be able " -"to be merged into the Flower repository. This is done so the codebase stays " -"consistent and easy to understand." +"Don't forget to test and format your code! Otherwise your code won't be " +"able to be merged into the Flower repository. This is done so the " +"codebase stays consistent and easy to understand." msgstr "" -"코드를 테스트하고 서식을 지정하는 것을 잊지 마세요! 그렇지 않으면 코드를 " -"Flower 레포지토리에 병합할 수 없습니다. 이는 코드베이스가 일관성을 유지하고 " -"이해하기 쉽도록 하기 위한 것입니다." +"코드를 테스트하고 서식을 지정하는 것을 잊지 마세요! 그렇지 않으면 코드를 Flower 레포지토리에 병합할 수 없습니다. 이는 " +"코드베이스가 일관성을 유지하고 이해하기 쉽도록 하기 위한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "이를 위해 실행할 수 있는 몇 가지 스크립트를 작성했습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" -msgstr "**Stage 변경**" +msgstr "**변경사항 스테이징**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" -"Before creating a commit that will update your history, you must specify to " -"Git which files it needs to take into account." -msgstr "" -"기록을 업데이트할 커밋을 만들기 전에 어떤 파일을 고려해야 하는지 Git에 지정해" -"야 합니다." +"Before creating a commit that will update your history, you must specify " +"to Git which files it needs to take into account." +msgstr "기록을 업데이트할 커밋을 만들기 전에 어떤 파일을 고려해야 하는지 Git에 지정해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "이 작업을 수행할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" -"To check which files have been modified compared to the last version (last " -"commit) and to see which files are staged for commit, you can use the :code:" -"`git status` command." +"To check which files have been modified compared to the last version " +"(last commit) and to see which files are staged for commit, you can use " +"the ``git status`` command." msgstr "" -"마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이" -"징된 파일을 확인하려면 :code:`git status` 명령을 사용하면 됩니다." +"마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이징된 파일을 확인하려면 :code:`git " +"status` 명령을 사용하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" -msgstr "**Commit 변경**" +msgstr "**변경사항 커밋**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" -msgstr "" -":code:`git add`를 사용하여 커밋하려는 모든 파일을 추가한 후, 마지막으로 이 명" -"령을 사용하여 커밋을 생성할 수 있습니다:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" +msgstr ":code:`git add`를 사용하여 커밋하려는 모든 파일을 추가한 후, 마지막으로 이 명령을 사용하여 커밋을 생성할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" -"The \\ is there to explain to others what the commit does. " -"It should be written in an imperative style and be concise. An example would " -"be :code:`git commit -m \"Add images to README\"`." +"The \\ is there to explain to others what the commit " +"does. It should be written in an imperative style and be concise. An " +"example would be ``git commit -m \"Add images to README\"``." msgstr "" -"커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명" -"령형 스타일로 작성해야 하며 간결해야 합니다. 예를 들면 :code:`git commit -m " -"\"Add images to README\"`." +"커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명령형 스타일로 작성해야 하며 간결해야" +" 합니다. 예를 들면 :code:`git commit -m \"Add images to README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**변경 사항을 포크에 푸시**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" -"Once we have committed our changes, we have effectively updated our local " -"history, but GitHub has no way of knowing this unless we push our changes to " -"our origin's remote address:" +"Once we have committed our changes, we have effectively updated our local" +" history, but GitHub has no way of knowing this unless we push our " +"changes to our origin's remote address:" msgstr "" -"변경 사항을 커밋하면 로컬 히스토리를 효과적으로 업데이트한 것이지만, 변경 사" -"항을 원본의 원격 주소로 푸시하지 않는 한 GitHub는 이를 알 방법이 없습니다:" +"변경 사항을 커밋하면 로컬 히스토리를 효과적으로 업데이트한 것이지만, 변경 사항을 원본의 원격 주소로 푸시하지 않는 한 " +"GitHub는 이를 알 방법이 없습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." -msgstr "" -"이 작업이 완료되면 변경한 내용으로 포크된 레포지토리가 업데이트된 것을 GitHub" -"에서 확인할 수 있습니다." +msgstr "이 작업이 완료되면 변경한 내용으로 포크된 레포지토리가 업데이트된 것을 GitHub에서 확인할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "pull request(PR) 만들기 및 병합하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**PR 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" -"Once you have pushed changes, on the GitHub webpage of your repository you " -"should see the following message:" -msgstr "" -"변경 사항을 푸시하고 나면 레포지토리의 GitHub 웹페이지에 다음 메시지가 표시됩" -"니다:" +"Once you have pushed changes, on the GitHub webpage of your repository " +"you should see the following message:" +msgstr "변경 사항을 푸시하고 나면 레포지토리의 GitHub 웹페이지에 다음 메시지가 표시됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "Otherwise you can always find this option in the ``Branches`` page." -msgstr "" -"그렇지 않으면 언제든지 ``Branches`` 페이지에서 이 옵션을 찾을 수 있습니다." +msgstr "그렇지 않으면 언제든지 ``Branches`` 페이지에서 이 옵션을 찾을 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" -msgstr "" -"``Compare & pull request`` 버튼을 클릭하면 이와 비슷한 화면이 표시됩니다:" +msgstr "``Compare & pull request`` 버튼을 클릭하면 이와 비슷한 화면이 표시됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 -msgid "" -"At the top you have an explanation of which branch will be merged where:" +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 +msgid "At the top you have an explanation of which branch will be merged where:" msgstr "상단에는 어느 지점이 어디에 병합될 것인지에 대한 설명이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" -"In this example you can see that the request is to merge the branch ``doc-" -"fixes`` from my forked repository to branch ``main`` from the Flower " -"repository." +"In this example you can see that the request is to merge the branch " +"``doc-fixes`` from my forked repository to branch ``main`` from the " +"Flower repository." msgstr "" -"이 예제에서는 내 포크된 레포지토리의 ``doc-fixes`` 브랜치를 Flower 레포지토리" -"의 ``main`` 브랜치에 병합하라는 요청을 볼 수 있습니다." +"이 예제에서는 내 포크된 레포지토리의 ``doc-fixes`` 브랜치를 Flower 레포지토리의 ``main`` 브랜치에 병합하라는" +" 요청을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " -"guidelines, otherwise it won't be possible to merge the PR. So in this case, " -"a correct title might be ``docs(framework:skip) Fix typos``." +"guidelines, otherwise it won't be possible to merge the PR. So in this " +"case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -"제목은 :ref:`pr_title_format` 가이드라인을 준수하도록 변경해야 하며, 그렇지 " -"않으면 PR을 병합할 수 없습니다. 따라서 이 경우 올바른 제목은 " -"``docs(framework:skip) Fix typos``이 될 수 있습니다." +"제목은 :ref:`pr_title_format` 가이드라인을 준수하도록 변경해야 하며, 그렇지 않으면 PR을 병합할 수 없습니다. " +"따라서 이 경우 올바른 제목은 ``docs(framework:skip) Fix typos``이 될 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" -"The input box in the middle is there for you to describe what your PR does " -"and to link it to existing issues. We have placed comments (that won't be " -"rendered once the PR is opened) to guide you through the process." +"The input box in the middle is there for you to describe what your PR " +"does and to link it to existing issues. We have placed comments (that " +"won't be rendered once the PR is opened) to guide you through the " +"process." msgstr "" -"가운데에 있는 입력 상자는 PR의 기능을 설명하고 기존 이슈에 연결할 수 있는 곳" -"입니다. 프로세스를 안내하기 위해 코멘트(PR이 열리면 렌더링되지 않음)를 배치했" -"습니다." +"가운데에 있는 입력 상자는 PR의 기능을 설명하고 기존 이슈에 연결할 수 있는 곳입니다. 프로세스를 안내하기 위해 코멘트(PR이 " +"열리면 렌더링되지 않음)를 배치했습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "코멘트에 설명된 지침을 따르는 것이 중요합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " -"reviewers that a new PR has been opened and that they should look over it to " -"merge or to request changes." +"reviewers that a new PR has been opened and that they should look over it" +" to merge or to request changes." msgstr "" -"하단에는 PR을 여는 버튼이 있습니다. 이렇게 하면 검토자에게 새 PR이 열렸으며 " -"병합하거나 변경을 요청하기 위해 검토해야 함을 알립니다." +"하단에는 PR을 여는 버튼이 있습니다. 이렇게 하면 검토자에게 새 PR이 열렸으며 병합하거나 변경을 요청하기 위해 검토해야 함을 " +"알립니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" -"If your PR is not yet ready for review, and you don't want to notify anyone, " -"you have the option to create a draft pull request:" -msgstr "" -"PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull " -"request 초안을 만드는 옵션이 있습니다:" +"If your PR is not yet ready for review, and you don't want to notify " +"anyone, you have the option to create a draft pull request:" +msgstr "PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull request 초안을 만드는 옵션이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**new changes 만들기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " -"commits to it the same way we did before, by making changes to the branch " -"associated with the PR." -msgstr "" -"PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으" -"로 새 커밋을 푸시할 수 있습니다." +"commits to it the same way we did before, by making changes to the branch" +" associated with the PR." +msgstr "PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으로 새 커밋을 푸시할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**PR 검토하기**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" -"Once the PR has been opened or once the draft PR has been marked as ready, a " -"review from code owners will be automatically requested:" -msgstr "" -"PR이 열리거나 초안 PR이 준비됨으로 표시되면 코드 소유자의 검토가 자동으로 요" -"청됩니다:" +"Once the PR has been opened or once the draft PR has been marked as " +"ready, a review from code owners will be automatically requested:" +msgstr "PR이 열리거나 초안 PR이 준비됨으로 표시되면 코드 소유자의 검토가 자동으로 요청됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" -"Code owners will then look into the code, ask questions, request changes or " -"validate the PR." -msgstr "" -"그러면 코드 소유자는 코드를 살펴보고, 질문하고, 변경을 요청하거나 PR의 유효성" -"을 검사합니다." +"Code owners will then look into the code, ask questions, request changes " +"or validate the PR." +msgstr "그러면 코드 소유자는 코드를 살펴보고, 질문하고, 변경을 요청하거나 PR의 유효성을 검사합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "진행 중인 변경 요청이 있는 경우 병합이 차단됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" -"To resolve them, just push the necessary changes to the branch associated " -"with the PR:" -msgstr "" -"이를 해결하려면 PR과 연결된 브랜치에 필요한 변경 사항을 푸시하면 됩니다:" +"To resolve them, just push the necessary changes to the branch associated" +" with the PR:" +msgstr "이를 해결하려면 PR과 연결된 브랜치에 필요한 변경 사항을 푸시하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "그리고 소통을 통해 해결하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" -"Once all the conversations have been resolved, you can re-request a review." +"Once all the conversations have been resolved, you can re-request a " +"review." msgstr "모든 대화가 해결되면 검토를 다시 요청할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**PR이 병합되면**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" -"If all the automatic tests have passed and reviewers have no more changes to " -"request, they can approve the PR and merge it." -msgstr "" -"모든 자동 테스트가 통과되고 검토자가 더 이상 요청할 변경 사항이 없는 경우 PR" -"을 승인하고 병합할 수 있습니다." +"If all the automatic tests have passed and reviewers have no more changes" +" to request, they can approve the PR and merge it." +msgstr "모든 자동 테스트가 통과되고 검토자가 더 이상 요청할 변경 사항이 없는 경우 PR을 승인하고 병합할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" -msgstr "" -"병합이 완료되면 GitHub에서 브랜치를 삭제할 수 있으며(삭제 버튼이 표시되어야 " -"함), 로컬에서도 삭제할 수 있습니다:" +msgstr "병합이 완료되면 GitHub에서 브랜치를 삭제할 수 있으며(삭제 버튼이 표시되어야 함), 로컬에서도 삭제할 수 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "그런 다음 다음을 수행하여 포크된 레포지토리를 업데이트해야 합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "첫 번째 기여의 예" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "문제" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "" -"For our documentation, we've started to use the `Diàtaxis framework `_." -msgstr "" -"저희 문서에는 'Diàtaxis 프레임워크 `_'를 사용하기 시작" -"했습니다." +"For our documentation, we've started to use the `Diàtaxis framework " +"`_." +msgstr "저희 문서에는 'Diàtaxis 프레임워크 `_'를 사용하기 시작했습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" -"Our \"How to\" guides should have titles that continue the sentence \"How to " -"…\", for example, \"How to upgrade to Flower 1.0\"." +"Our \"How to\" guides should have titles that continue the sentence \"How" +" to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -"'How to' 가이드의 제목은 \"How to …\"라는 문장을 이어가는 제목이어야 합니다" -"(예: \"How to upgrade to Flower 1.0\")." +"'How to' 가이드의 제목은 \"How to …\"라는 문장을 이어가는 제목이어야 합니다(예: \"How to upgrade " +"to Flower 1.0\")." -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." -msgstr "" -"대부분의 가이드는 아직 이 새로운 형식을 따르지 않으며, 안타깝게도 제목을 변경" -"하는 작업은 생각보다 복잡합니다." +msgstr "대부분의 가이드는 아직 이 새로운 형식을 따르지 않으며, 안타깝게도 제목을 변경하는 작업은 생각보다 복잡합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 msgid "" -"This issue is about changing the title of a doc from present continuous to " -"present simple." -msgstr "" -"이번 이슈는 문서 제목을 현재 연속형에서 현재 단순형으로 변경하는 것에 관한 것" -"입니다." +"This issue is about changing the title of a doc from present continuous " +"to present simple." +msgstr "이번 이슈는 문서 제목을 현재 연속형에서 현재 단순형으로 변경하는 것에 관한 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "" -"\"How to saving progress\"을 \"How to save progress\"으로 변경한 예를 들어 보" -"겠습니다. 이것이 우리의 점검을 통과했나요?" +"\"How to saving progress\"을 \"How to save progress\"으로 변경한 예를 들어 보겠습니다. " +"이것이 우리의 점검을 통과했나요?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "Before: \"How to saving progress\" ❌" msgstr "Before: \"How to saving progress\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 msgid "After: \"How to save progress\" ✅" msgstr "After: \"How to save progress\" ✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "해결법" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -"이것은 사소한 변경이지만 end-to-end 설정을 테스트할 수 있습니다. Flower " -"레포지토리를 복제하고 설정한 후에는 다음과 같이 하세요:" +"이것은 사소한 변경이지만 end-to-end 설정을 테스트할 수 있습니다. Flower 레포지토리를 복제하고 설정한 후에는 다음과 " +"같이 하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Find the source file in ``doc/source``" msgstr "``doc/source``에서 소스 파일을 찾습니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" -msgstr "" -"``.rst`` 파일에서 변경합니다(제목 아래의 대시는 제목 자체의 길이와 같아야 합" -"니다)" +msgstr "``.rst`` 파일에서 변경합니다(제목 아래의 대시는 제목 자체의 길이와 같아야 합니다)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "" -"Build the docs and `check the result `_" +"Build the docs and `check the result `_" msgstr "" -"문서를 빌드하고 '결과 확인 `_'합니다" +"문서를 빌드하고 '결과 확인 `_'합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "파일 이름 바꾸기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" -"You might have noticed that the file name still reflects the old wording. If " -"we just change the file, then we break all existing links to it - it is " -"**very important** to avoid that, breaking links can harm our search engine " -"ranking." +"You might have noticed that the file name still reflects the old wording." +" If we just change the file, then we break all existing links to it - it " +"is **very important** to avoid that, breaking links can harm our search " +"engine ranking." msgstr "" -"파일 이름에 여전히 이전 문구가 반영되어 있는 것을 보셨을 것입니다. 파일만 변" -"경하면 파일에 대한 기존 링크가 모두 끊어지는데, 링크를 끊으면 검색 엔진 순위" -"에 영향을 줄 수 있으므로 이를 방지하는 것이 **매우 중요**합니다." +"파일 이름에 여전히 이전 문구가 반영되어 있는 것을 보셨을 것입니다. 파일만 변경하면 파일에 대한 기존 링크가 모두 끊어지는데, " +"링크를 끊으면 검색 엔진 순위에 영향을 줄 수 있으므로 이를 방지하는 것이 **매우 중요**합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 msgid "Here's how to change the file name:" msgstr "파일 이름을 변경하는 방법은 다음과 같습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 msgid "Change the file name to ``save-progress.rst``" msgstr "파일 이름을 ``save-progress.rst``로 변경합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "'doc/source/conf.py'에 리디렉션 규칙을 추가합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "" -"This will cause a redirect from ``saving-progress.html`` to ``save-progress." -"html``, old links will continue to work." +"This will cause a redirect from ``saving-progress.html`` to ``save-" +"progress.html``, old links will continue to work." msgstr "" -"이렇게 하면 ``saving-progress.html``에서 ``save-progress.html``로 리디렉션되" -"며, 이전 링크는 계속 작동합니다." +"이렇게 하면 ``saving-progress.html``에서 ``save-progress.html``로 리디렉션되며, 이전 링크는 " +"계속 작동합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "인덱스 파일에 변경 사항 적용" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -"횡방향 내비게이션 바가 제대로 작동하려면 ``index.rst`` 파일도 업데이트하는 것" -"이 매우 중요합니다. 이 파일은 탐색 모음의 전체 배열을 정의하는 곳입니다." +"횡방향 내비게이션 바가 제대로 작동하려면 ``index.rst`` 파일도 업데이트하는 것이 매우 중요합니다. 이 파일은 탐색 모음의" +" 전체 배열을 정의하는 곳입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Find and modify the file name in ``index.rst``" msgstr "``index.rst``에서 파일 이름을 찾아 수정합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "PR 열기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 msgid "" -"Commit the changes (commit messages are always imperative: \"Do something\", " -"in this case \"Change …\")" -msgstr "" -"변경 사항을 커밋합니다(커밋 메시지는 항상 필수 메시지입니다:\"Do " -"something\"(이 경우 는 \"Change …\" )" +"Commit the changes (commit messages are always imperative: \"Do " +"something\", in this case \"Change …\")" +msgstr "변경 사항을 커밋합니다(커밋 메시지는 항상 필수 메시지입니다:\"Do something\"(이 경우 는 \"Change …\" )" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "변경 사항을 포크에 푸시합니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" -"Open a PR (as shown above) with title ``docs(framework) Update how-to guide " -"title``" -msgstr "" -"``docs(framework) Update how-to guide title`` 제목으로 PR(위와 같이)을 엽니다" +"Open a PR (as shown above) with title ``docs(framework) Update how-to " +"guide title``" +msgstr "``docs(framework) Update how-to guide title`` 제목으로 PR(위와 같이)을 엽니다" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "승인될 때까지 기다리세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "축하합니다! 이제 공식적으로 Flower 기여자가 되셨습니다!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "다음 단계" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" -"Once you have made your first PR, and want to contribute more, be sure to " -"check out the following :" +"Once you have made your first PR, and want to contribute more, be sure to" +" check out the following :" msgstr "첫 번째 PR을 작성하고 더 많은 기여를 하고 싶다면 다음을 확인하세요:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#, fuzzy msgid "" -":doc:`Good first contributions `, " -"where you should particularly look into the :code:`baselines` contributions." +":doc:`Good first contributions `, where you should particularly look into the " +"``baselines`` contributions." msgstr "" -":doc:`훌륭한 첫 번째 기여 `, 특히 :" -"code:`baselines` 기여를 살펴봐야 합니다." +":doc:`훌륭한 첫 번째 기여 `, 특히 " +":code:`baselines` 기여를 살펴봐야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "부록" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "PR 제목 형식" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "다음과 같은 PR 제목 형식을 적용합니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" -"(or ``(:skip) `` to ignore the PR in the changelog)" -msgstr "" -"(또는 ``(:skip) ``를 사용하면 변경 로그에서 PR을 무시" -"합니다.)" +"(or ``(:skip) `` to ignore the PR in the " +"changelog)" +msgstr "(또는 ``(:skip) ``를 사용하면 변경 로그에서 PR을 무시합니다.)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" -"Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, " -"```` should be in ``{framework, baselines, datasets, examples, or " -"'*' when modifying multiple projects which requires the ':skip' flag to be " -"used}``, and ```` starts with a capitalised verb in the imperative " -"mood." +"Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " +"break}``, ```` should be in ``{framework, baselines, datasets, " +"examples, or '*' when modifying multiple projects which requires the " +"':skip' flag to be used}``, and ```` starts with a capitalised " +"verb in the imperative mood." msgstr "" -"여기서 ````은 ``{ci, fix, feat, docs, refactor, break}``, ````" -"는 ``{framework, baselines, datasets, examples, or '*' ':skip' 플래그를 사용" -"해야 하는 여러 프로젝트를 수정하는 경우}``로 입력해야 하며, ````는 " -"대문자로 시작해야 합니다." +"여기서 ````은 ``{ci, fix, feat, docs, refactor, break}``, " +"````는 ``{framework, baselines, datasets, examples, or '*' " +"':skip' 플래그를 사용해야 하는 여러 프로젝트를 수정하는 경우}``로 입력해야 하며, ````는 대문자로 " +"시작해야 합니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 msgid "Valid examples:" msgstr "유효한 예시입니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "``feat(framework) Add flwr build CLI command``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "``refactor(examples:skip) Improve quickstart-pytorch logging``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "``ci(*:skip) Enforce PR title format``" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 msgid "Invalid examples:" msgstr "잘못된 예시입니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "``feat(framework): Add flwr build CLI command`` ( ``:``제외)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" -msgstr "" -"``feat(*) Add flwr build CLI command`` (``skip`` flag와 함께 ``*``누락)" +msgstr "``feat(*) Add flwr build CLI command`` (``skip`` flag와 함께 ``*``누락)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "``feat(skip) Add flwr build CLI command`` (````누락)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" -msgstr "" -"``feat(framework) add flwr build CLI command`` (대문자로 표기되지 않은 동사)" +msgstr "``feat(framework) add flwr build CLI command`` (대문자로 표기되지 않은 동사)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "``feat(framework) Add flwr build CLI command.`` (끝에 마침표)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "``Add flwr build CLI command.`` ( ``()``누락)" @@ -2138,13 +2165,17 @@ msgid "Get started as a contributor" msgstr "기여자로 시작하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "전제 조건" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.8 `_ or above" -msgstr "Python 3.8 `_ 이상" +msgid "`Python 3.9 `_ or above" +msgstr "Python 3.9 `_ 이상" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2155,27 +2186,27 @@ msgid "(Optional) `pyenv `_" msgstr "(선택 사항) `pyenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 -msgid "" -"(Optional) `pyenv-virtualenv `_" -msgstr "" -"(선택 사항) `pyenv-virtualenv `_" +msgid "(Optional) `pyenv-virtualenv `_" +msgstr "(선택 사항) `pyenv-virtualenv `_" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +#, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " -"development tools (the ones which support it). Poetry is a build tool which " -"supports `PEP 517 `_." +"Flower uses ``pyproject.toml`` to manage dependencies and configure " +"development tools (the ones which support it). Poetry is a build tool " +"which supports `PEP 517 `_." msgstr "" -"Flower는 dependencies을 관리하고 개발 도구(이를 지원하는 도구)를 구성하기 위" -"해 :code:`pyproject.toml`을 사용합니다. Poetry는 `PEP 517 `_을 지원하는 빌드 도구입니다." +"Flower는 dependencies을 관리하고 개발 도구(이를 지원하는 도구)를 구성하기 위해 " +":code:`pyproject.toml`을 사용합니다. Poetry는 `PEP 517 " +"`_을 지원하는 빌드 도구입니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "개발자 머신 설정" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +#, fuzzy +msgid "Preliminaries" msgstr "사전 준비" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2188,20806 +2219,27082 @@ msgstr "macOS의 경우" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:27 msgid "" -"Install `homebrew `_. Don't forget the post-installation " -"actions to add `brew` to your PATH." -msgstr "" -"`homebrew `_를 설치합니다. 설치 후 `brew`를 PATH에 추가하" -"는 작업을 잊지 마세요." +"Install `homebrew `_. Don't forget the post-" +"installation actions to add `brew` to your PATH." +msgstr "`homebrew `_를 설치합니다. 설치 후 `brew`를 PATH에 추가하는 작업을 잊지 마세요." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 +#, fuzzy msgid "" -"Install `xz` (to install different Python versions) and `pandoc` to build " -"the docs::" -msgstr "" -"xz`(다른 Python 버전을 설치하려면)와 `pandoc`을 설치하여 문서를 빌드합니다::" +"Install `xz` (to install different Python versions) and `pandoc` to build" +" the docs:" +msgstr "xz`(다른 Python 버전을 설치하려면)와 `pandoc`을 설치하여 문서를 빌드합니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "Ubuntu의 경우" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 +#, fuzzy msgid "" -"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary " -"packages::" -msgstr "" -"시스템(우분투 22.04 이상)이 최신 상태이고 필요한 패키지가 모두 설치되어 있는" -"지 확인하세요:" +"Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " +"necessary packages:" +msgstr "시스템(우분투 22.04 이상)이 최신 상태이고 필요한 패키지가 모두 설치되어 있는지 확인하세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 msgid "Create Flower Dev Environment" msgstr "Flower 개발 환경 만들기" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 +#, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" -msgstr "" -"1. GitHub: 에서 ``Flower 레포지토리 `_를 복제" -"합니다::" +"Clone the `Flower repository `_ from " +"GitHub:" +msgstr "1. GitHub: 에서 ``Flower 레포지토리 `_를 복제합니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 +#, fuzzy msgid "" -"Let's create the Python environment for all-things Flower. If you wish to " -"use :code:`pyenv`, we provide two convenience scripts that you can use. If " -"you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +"Let's create the Python environment for all-things Flower. If you wish to" +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -"Flower의 모든 것을 위한 파이썬 환경을 만들어 보겠습니다.:code:`pyenv`를 사용" -"하고자 하는 경우 사용할 수 있는 두 가지 편의 스크립트를 제공합니다.:code:" -"`pyenv`가 아닌 다른 것을 사용하려면 새 환경을 생성하고 활성화한 후 모든 패키" -"지가 설치된 마지막 지점으로 건너뛰세요." +"Flower의 모든 것을 위한 파이썬 환경을 만들어 보겠습니다.:code:`pyenv`를 사용하고자 하는 경우 사용할 수 있는 두 " +"가지 편의 스크립트를 제공합니다.:code:`pyenv`가 아닌 다른 것을 사용하려면 새 환경을 생성하고 활성화한 후 모든 패키지가" +" 설치된 마지막 지점으로 건너뛰세요." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 +#, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will " -"install it, set it up, and create the virtual environment (with :code:" -"`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 " -"및 가상 환경을 생성합니다(기본적으로 :code:`Python 3.8.17` 사용):" +":code:`pyenv`가 설치되어 있지 않은 경우 다음 스크립트를 사용하여 설치, 설정 및 가상 환경을 생성합니다(기본적으로 " +":code:`Python 3.9.20` 사용):" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with :" -"code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 " -"함께) 다음과 같은 편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python " -"3.8.17` 사용):" +":code:`pyenv`가 이미 설치되어 있는 경우( :code:`pyenv-virtualenv` 플러그인과 함께) 다음과 같은 " +"편의 스크립트를 사용할 수 있습니다(기본적으로 코드:`Python 3.9.20` 사용):" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip install -" -"e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" -"3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다" -"(예:code:`pip install -e`)::" +"3. 필요한 모든 dependencies와 함께 개발 모드에서 Flower 패키지를 설치합니다(예:code:`pip install " +"-e`)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "편의 스크립트" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the :code:`/" -"dev` subdirectory for a full list. The following scripts are amongst the " -"most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" -"Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가" -"지 편의 스크립트가 포함되어 있습니다. 전체 목록은 :code:`/dev` 하위 디렉터리" -"를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:" +"Flower 레포지토리에는 반복적인 개발 작업을 더 쉽고 오류를 줄이기 위한 여러 가지 편의 스크립트가 포함되어 있습니다. 전체 " +"목록은 :code:`/dev` 하위 디렉터리를 참조하세요. 다음 스크립트는 가장 중요한 스크립트 중 하나입니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "가상 환경 생성/삭제" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "ProtoBuf 정의 컴파일" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "자동 포맷 코드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "린터 및 테스트 실행" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "Add a pre-commit hook" msgstr "사전 커밋 훅 추가" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" -"Developers may integrate a pre-commit hook into their workflow utilizing the " -"`pre-commit `_ library. The pre-commit hook " -"is configured to execute two primary operations: ``./dev/format.sh`` and ``./" -"dev/test.sh`` scripts." +"Developers may integrate a pre-commit hook into their workflow utilizing " +"the `pre-commit `_ library. The pre-" +"commit hook is configured to execute two primary operations: " +"``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -"개발자는 `pre-commit `_ 라이브러리를 사용하" -"여 사전 커밋 훅을 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 " -"작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및 ``./dev/test.sh`` 스크립" -"트." +"개발자는 `pre-commit `_ 라이브러리를 사용하여 사전 커밋 훅을" +" 워크플로에 통합할 수 있습니다. 사전 커밋 훅은 두 가지 기본 작업을 실행하도록 구성됩니다:``./dev/format.sh`` 및" +" ``./dev/test.sh`` 스크립트." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 msgid "There are multiple ways developers can use this:" msgstr "개발자가 이것을 사용할 수 있는 여러가지 방법이 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 -msgid "" -"Install the pre-commit hook to your local git directory by simply running:" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "간단하게 실행하여 로컬 git 디렉터리에 사전 커밋 훅을 설치하세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 msgid "" -"Each ``git commit`` will trigger the execution of formatting and linting/" -"test scripts." +"Each ``git commit`` will trigger the execution of formatting and " +"linting/test scripts." msgstr "각 ``git 커밋``은 포맷 및 린팅/테스트 스크립트의 실행을 트리거합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 +#, fuzzy msgid "" -"If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` " -"command. ::" -msgstr "" -"급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:" +"If in a hurry, bypass the hook using ``--no-verify`` with the ``git " +"commit`` command." +msgstr "급한 경우 ``git commit`` 명령과 함께 `--no-verify``를 사용하여 훅을 넘기세요:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 msgid "" "For developers who prefer not to install the hook permanently, it is " -"possible to execute a one-time check prior to committing changes by using " -"the following command:" -msgstr "" -"훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사" -"항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:" +"possible to execute a one-time check prior to committing changes by using" +" the following command:" +msgstr "훅을 영구적으로 설치하지 않으려는 개발자의 경우 다음 명령을 사용하여 변경 사항을 커밋하기 전에 일회성 검사를 실행할 수 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." -msgstr "" -"이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 " -"및 린팅 검사/테스트를 실행합니다." +msgstr "이렇게 하면 ``git commit``의 기본 동작을 수정하지 않고 모든 파일에 대해 포맷 및 린팅 검사/테스트를 실행합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "로컬에서 Github Action(CI) 실행하기" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 +#, fuzzy msgid "" -"Developers could run the full set of Github Actions workflows under their " -"local environment by using `Act `_. Please " -"refer to the installation instructions under the linked repository and run " -"the next command under Flower main cloned repository folder::" +"Developers could run the full set of Github Actions workflows under their" +" local environment by using `Act `_. " +"Please refer to the installation instructions under the linked repository" +" and run the next command under Flower main cloned repository folder:" msgstr "" -"개발자는 `Act `_를 사용하여 로컬 환경에서 전" -"체 Github Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아" -"래의 설치 지침을 참조하여 Flower 메인 클론 레포지토리 폴더 아래에서 다음 명령" -"을 실행하세요::" +"개발자는 `Act `_를 사용하여 로컬 환경에서 전체 Github " +"Actions 워크플로우 세트를 실행할 수 있습니다. 링크된 레포지토리 아래의 설치 지침을 참조하여 Flower 메인 클론 " +"레포지토리 폴더 아래에서 다음 명령을 실행하세요::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." -msgstr "" -"Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다." +msgstr "Flower 기본 워크플로우는 아래에 필요한 Docker 머신을 설정하여 실행합니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 msgid "Build Release" msgstr "릴리즈 빌드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 +#, fuzzy msgid "" -"Flower uses Poetry to build releases. The necessary command is wrapped in a " -"simple script::" -msgstr "" -"Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트" -"로 래핑됩니다::" +"Flower uses Poetry to build releases. The necessary command is wrapped in" +" a simple script:" +msgstr "Flower는 Poetry를 사용하여 릴리즈를 빌드합니다. 필요한 명령은 간단한 스크립트로 래핑됩니다::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in " -"the :code:`/dist` subdirectory." -msgstr "" -"결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리" -"에 저장됩니다." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." +msgstr "결과물인 :code:`.whl` 및 :code:`.tar.gz` 릴리즈는 :code:`/dist` 하위 디렉터리에 저장됩니다." -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 msgid "Build Documentation" msgstr "문서 빌드" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 +#, fuzzy msgid "" -"Flower's documentation uses `Sphinx `_. There's " -"no convenience script to re-build the documentation yet, but it's pretty " -"easy::" +"Flower's documentation uses `Sphinx `_. " +"There's no convenience script to re-build the documentation yet, but it's" +" pretty easy:" msgstr "" -"Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문" -"서를 다시 작성할 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 " -"있습니다:" +"Flower의 문서는 `Sphinx `_를 사용합니다. 아직 문서를 다시 작성할" +" 수 있는 편리한 스크립트는 없지만 다음과 같이 쉽게 작성할 수 있습니다:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "그러면 ``doc/build/html``에 HTML 문서가 생성됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" +#: ../../source/docker/enable-tls.rst:2 +#, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "보안 연결을 위한 SSL 사용 설정" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated version " -"of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. We " -"are using PyTorch to train a Convolutional Neural Network(with Batch " -"Normalization layers) on the CIFAR-10 dataset. When applying FedBN, only few " -"changes needed compared to :doc:`Example: PyTorch - From Centralized To " -"Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " -"`_으로 기존 머신러닝 워크로드의 federated " -"버전을 구축하기 위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 " -"사용하여 CIFAR-10 데이터 세트에서 컨볼루션 신경망(일괄 정규화 레이어 포함)을 " -"훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " -"연합식으로 ` 와 비교했을 때 " -"몇 가지 사항만 변경 하면 됩니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "중앙 집중식 훈련" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:7 +#, fuzzy msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized To " -"Federated `. The only thing " -"to do is modifying the file called :code:`cifar.py`, revised part is shown " -"below:" -msgstr "" -"모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar." -"py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 아래와 같습니다:" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 PEM으로 인코딩된 인증서 체인이 필요합니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니" -"다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" -msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" +"테스트 목적으로 자체 서명된 인증서를 생성할 수 있습니다. 'SSL 연결 사용 " +"`__ 페이지에 프로세스를 안내하는 섹션이 있습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:17 +#, fuzzy msgid "" -"So far this should all look fairly familiar if you've used PyTorch before. " -"Let's take the next step and use what we've built to create a federated " -"learning system within FedBN, the system consists of one server and two " -"clients." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. " -"다음 단계로 넘어가서 우리가 구축한 것을 사용하여 FedBN 내에서 하나의 서버와 " -"두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "연합 훈련" +"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트된 파일과 디렉터리에 사용자 ID " +"``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " +"사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:20 +#, fuzzy msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are " -"easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the :doc:" -"`Example: PyTorch - From Centralized To Federated `. first." +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 " -":code:`client.py`의 :code:`get_parameters`와 :code:`set_parameters` 함수만 " -"수정해야 합니다. 그렇지 않은 경우 :doc:`예제: 파이토치 - 중앙 집중식에서 " -"연합식으로 `를 먼저 " -"읽어보세요." +"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트된 파일과 디렉터리에 사용자 ID " +"``49999``에 대한 적절한 권한이 있어야 합니다. 예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 " +"사용자 ID를 변경하려면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, :code:" -"`server.py` keeps unchanged, we can start the server directly." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:" -"`server.py`는 변경되지 않고 그대로 유지되므로 서버를 바로 시작할 수 있습니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst:27 +#, fuzzy +msgid "SuperLink" +msgstr "flower 초연결" + +#: ../../source/docker/enable-tls.rst:29 msgid "" -"Finally, we will revise our *client* logic by changing :code:" -"`get_parameters` and :code:`set_parameters` in :code:`client.py`, we will " -"exclude batch normalization parameters from model parameter list when " -"sending to or receiving from the server." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 :code:" -"`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 " -"서버에서 받을 때 모델 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습" -"니다." - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 -msgid "" -"in each window (make sure that the server is still running before you do so) " -"and see your (previously centralized) PyTorch project run federated learning " -"with FedBN strategy across two clients. Congratulations!" +#: ../../source/docker/enable-tls.rst +msgid "Understanding the command" msgstr "" -"를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 " -"집중된) PyTorch 프로젝트가 두 클라이언트에서 FedBN으로 연합 학습을 실행하는 " -"것을 확인합니다. 축하합니다!" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "다음 단계" +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +#, fuzzy +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 -msgid "" -"The full source code for this example can be found `here `_. Our " -"example is of course somewhat over-simplified because both clients load the " -"exact same dataset, which isn't realistic. You're now prepared to explore " -"this topic further. How about using different subsets of CIFAR-10 on each " -"client? How about adding more clients?" -msgstr "" -"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물" -"론 이 예제는 두 클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다" -"소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 주제를 더 자" -"세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집" -"합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것은 어떨까요?" - -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "예시: JAX - JAX Federated 실행" - -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 -msgid "" -"This tutorial will show you how to use Flower to build a federated version " -"of an existing JAX workload. We are using JAX to train a linear regression " -"model on a scikit-learn dataset. We will structure the example similar to " -"our `PyTorch - From Centralized To Federated `_ walkthrough. " -"First, we build a centralized training approach based on the `Linear " -"Regression with JAX `_ tutorial`. Then, we build upon the centralized " -"training code to run the training in a federated fashion." -msgstr "" -"이 튜토리얼에서는 Flower를 사용하여 기존 JAX 워크로드의 연합 버전을 구축하는 " -"방법을 보여드립니다. JAX를 사용해 scikit-learn 데이터 세트에서 선형 회귀 " -"모델을 훈련하고 있습니다. 예제는 '파이토치 - Centralized에서 Federated으로 " -"`_ 워크스루와 유사하게 구성하겠습니다. 먼저, `JAX를 사용한 선형 " -"회귀 `_ 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 " -"다음 centralized 트레이닝 코드를 기반으로 federated 방식으로 트레이닝을 " -"실행합니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 -msgid "" -"Before we start building our JAX example, we need install the packages :code:" -"`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" -msgstr "" -"JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-" -"learn`, :code:`flwr` 패키지를 설치해야 합니다:" - -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "JAX를 사용한 선형 회귀" +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based on " -"a :code:`Linear Regression` model. If you want a more in-depth explanation " -"of what's going on then have a look at the official `JAX documentation " -"`_." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -"먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 " -"간략한 설명부터 시작하겠습니다. 더 자세한 설명을 원하시면 공식 `JAX 문서 " -"`_를 참조하세요." -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 +#: ../../source/docker/enable-tls.rst msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to be " -"imported. In addition, we need to import :code:`sklearn` since we use :code:" -"`make_regression` for the dataset and :code:`train_test_split` to split the " -"dataset into a training and test set. You can see that we do not yet import " -"the :code:`flwr` package for federated learning. This will be done later." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -"전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 " -":code:`jax_training.py`라는 새 파일을 생성해 보겠습니다. 먼저, JAX 패키지인 " -":code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에 " -":code:`make_regression`을 사용하고 데이터 세트를 학습 및 테스트 세트로 " -"분할하기 위해 :code:`train_test_split`을 사용하므로 :code:`sklearn`을 " -"가져와야 합니다. 연합 학습을 위해 아직 :code:`flwr` 패키지를 가져오지 않은 " -"것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 -msgid "" -"The :code:`load_data()` function loads the mentioned training and test sets." +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -"code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is " -"defined in :code:`load_model()`." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "directory." msgstr "" -"모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정" -"의되어 있습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"We now need to define the training (function :code:`train()`), which loops " -"over the training set and measures the loss (function :code:`loss_fn()`) for " -"each batch of training examples. The loss function is separate since JAX " -"takes derivatives with a :code:`grad()` function (defined in the :code:" -"`main()` function and called in :code:`train()`)." +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :" -"code:`loss_fn()`) 훈련(함수 :code:`train()`)을 정의해야 합니다. JAX는 :code:" -"`grad()` 함수(:code:`main()` 함수에 정의되고 :code:`train()`에서 호출됨)로 파" -"생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst msgid "" -"The evaluation of the model is defined in the function :code:`evaluation()`. " -"The function takes all test examples and measures the loss of the linear " -"regression model." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -"모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 " -"테스트 예제를 가져와 선형 회귀 모델의 손실을 측정합니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. As " -"already mentioned, the :code:`jax.grad()` function is defined in :code:" -"`main()` and passed to :code:`train()`." +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하" -"여 JAX를 사용 모델을 훈련할 수 있습니다. 이미 언급했듯이 :code:`jax.grad()` " -"함수는 :code:`main()`에 정의되어 :code:`train()`에 전달됩니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "VSCode Dev Container에서 개발" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. Let's " -"take the next step and use what we've built to create a simple federated " -"learning system consisting of one server and two clients." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -"지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니" -"다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 하나의 서버와 두 개의 클" -"라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX와 Flower의 만남" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower 초연결" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 -msgid "" -"The concept of federating an existing workload is always the same and easy " -"to understand. We have to start a *server* and then use the code in :code:" -"`jax_training.py` for the *clients* that are connected to the *server*. The " -"*server* sends model parameters to the clients. The *clients* run the " -"training and update the parameters. The updated parameters are sent back to " -"the *server*, which averages all received parameter updates. This describes " -"one round of the federated learning process, and we repeat this for multiple " -"rounds." -msgstr "" -"기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 " -"시작한 다음 *서버*에 연결된 *클라이언트*에 대해 :code:`jax_training.py`의 " -"코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " -"클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 " -"파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 업데이트의 평균을 " -"구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 " -"라운드에 걸쳐 반복합니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 -msgid "" -"Our example consists of one *server* and two *clients*. Let's set up :code:" -"`server.py` first. The *server* needs to import the Flower package :code:" -"`flwr`. Next, we use the :code:`start_server` function to start a server and " -"tell it to perform three rounds of federated learning." -msgstr "" -"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 " -":code:`server.py`를 설정해 보겠습니다. *server*는 Flower 패키지 :code:`flwr`" -"를 가져와야 합니다. 다음으로, :code:`start_server` 함수를 사용하여 서버를 " -"시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "이미 *서버*를 시작할 수 있습니다:" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build " -"upon the previously defined JAX training in :code:`jax_training.py`. Our " -"*client* needs to import :code:`flwr`, but also :code:`jax` and :code:" -"`jaxlib` to update the parameters on our JAX model:" -msgstr "" -"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:" -"`jax_training.py`에서 이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언" -"트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를 업데이트하기 위해 :" -"code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" - -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of " -"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " -"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " -"call it :code:`FlowerClient`. :code:`NumPyClient` is slightly easier to " -"implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like JAX) because it avoids some of the boilerplate that " -"would otherwise be necessary. :code:`FlowerClient` needs to implement four " -"methods, two methods for getting/setting model parameters, one method for " -"training the model, and one method for testing the model:" -msgstr "" -"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` " -"또는 :code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니" -"다. 구현은 :code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:" -"`FlowerClient`라고 부를 것입니다. :code:`NumPyClient`는 필요한 일부 보일러플" -"레이를 피할 수 있기 때문에 NumPy 상호 운용성이 좋은 프레임워크(예: JAX)를 사" -"용하는 경우 :code:`Client`보다 구현하기가 약간 더 쉽습니다. code:" -"`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 " -"위한 메서드 1개, 모델 테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 " -"합니다:" - -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (선택사항)`" +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the server" -msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy :code:`ndarray`'s " -"(think list of neural network layers)" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -"(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파" -"라미터 목록에 대해 반복합니다" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 -msgid "" -"get the model parameters and return them as a list of NumPy :code:" -"`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" -msgstr "" -"모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 :" -"code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" - -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" - -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 -msgid "" -"update the parameters of the local model with the parameters received from " -"the server" -msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "로컬 훈련 세트에서 모델을 훈련합니다" - -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" -msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" - -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`evaluate`" - -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" +#: ../../source/docker/enable-tls.rst +msgid "the network." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "로컬 손실을 서버로 반환합니다" +#: ../../source/docker/enable-tls.rst:72 +#, fuzzy +msgid "SuperNode" +msgstr "run\\_supernode" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/enable-tls.rst:74 +#, fuzzy msgid "" -"The challenging part is to transform the JAX model parameters from :code:" -"`DeviceArray` to :code:`NumPy ndarray` to make them compatible with " -"`NumPyClient`." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -"어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy " -"ndarray`로 변환하여 `NumPyClient`와 호환되도록 하는 것입니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 -msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " -"use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type annotations " -"to give you a better understanding of the data types that get passed around." -msgstr "" -"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전" -"에 :code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`" -"를 사용합니다. 따라서 여기서 우리가 실제로 하는 일은 이미 정의된 함수 중 훈련" -"과 평가를 위해 호출할 함수를 :code:`NumPyClient` 서브클래스를 통해 Flower에" -"게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 유형 " -"type annotation을 포함했습니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." - -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 " -"있습니다" +"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작할 " +"때 ``--root-certificates`` 플래그를 사용하세요." -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"in each window (make sure that the server is still running before you do so) " -"and see your JAX project run federated learning across two clients. " -"Congratulations!" +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -"를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 " -"연합 학습을 실행하는 JAX 프로젝트를 확인합니다. 축하합니다!" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 -msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -"이 예제의 소스 코드는 시간이 지남에 따라 개선되었으며 여기에서 확인할 수 있습" -"니다: 'Quickstart JAX `_. 두 클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예" -"제는 다소 단순화되어 있습니다." -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/enable-tls.rst msgid "" -"You're now prepared to explore this topic further. How about using a more " -"sophisticated model or using a different dataset? How about adding more " -"clients?" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -"이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거" -"나 다른 데이터 집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것" -"은 어떨까요?" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "VSCode Dev Container에서 개발" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated version " -"of an existing machine learning workload. We are using PyTorch to train a " -"Convolutional Neural Network on the CIFAR-10 dataset. First, we introduce " -"this machine learning task with a centralized training approach based on the " -"`Deep Learning with PyTorch `_ tutorial. Then, we build upon the centralized " -"training code to run the training in a federated fashion." -msgstr "" -"이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 워크로드의 연합 버전을 " -"구축하는 방법을 보여드립니다. 여기서는 PyTorch를 사용해 CIFAR-10 데이터 " -"세트에서 컨볼루션 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " -"`_ " -"튜토리얼을 기반으로 centralized 학습 접근 방식을 사용하여 이 머신 러닝 " -"작업을 소개합니다. 그런 다음 centralized 훈련 코드를 기반으로 연합 방식 " -"훈련을 실행합니다." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized CNN training code. If " -"you want a more in-depth explanation of what's going on then have a look at " -"the official `PyTorch tutorial `_." +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"중앙 집중식 CNN 트레이닝 코드에 대한 간략한 설명부터 시작하겠습니다. 무슨 " -"일이 일어나고 있는지 더 자세히 설명하려면 공식 `PyTorch 튜토리얼 " -"`_을 " -"참조하세요." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 -msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all " -"required packages (such as :code:`torch` and :code:`torchvision`) need to be " -"imported. You can see that we do not import any package for federated " -"learning. You can keep all these imports as they are even when we add the " -"federated learning components at a later point." +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -"CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 " -":code:`cifar.py`라는 새 파일을 생성해 보겠습니다. 먼저, 필요한 모든 " -"패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 합니다. 연합 " -"학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 " -"구성 요소를 추가할 때에도 이러한 모든 가져오기를 그대로 유지할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 -msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" msgstr "" -"이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. " -"모델 아키텍처(매우 간단한 컨볼루션 신경망)는 :code:`class Net()`에 정의되어 " -"있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst:107 msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test sets. " -"The :code:`transform` normalized the data after loading." +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:" -"`transform`은 로드 후 데이터를 정규화합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`) which loops " -"over the training set, measures the loss, backpropagates it, and then takes " -"one optimizer step for each batch of training examples." +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 " -"배치에 대해 하나의 최적화 단계를 수행하는 학습(함수 :code:`train()`)을 정의해" -"야 합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. The " -"function loops over all test samples and measures the loss of the model " -"based on the test dataset." +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." msgstr "" -"모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘" -"플을 반복하고 테스트 데이터 세트에 따라 모델의 손실을 측정합니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/enable-tls.rst msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 " -"CIFAR-10에서 CNN을 훈련할 수 있습니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/enable-tls.rst msgid "" -"So far, this should all look fairly familiar if you've used PyTorch before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." -msgstr "" -"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다" -"음 단계로 넘어가서 구축한 것을 사용하여 하나의 서버와 두 개의 클라이언트로 구" -"성된 간단한 연합 학습 시스템을 만들어 보겠습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 -msgid "" -"The simple machine learning project discussed in the previous section trains " -"the model on a single dataset (CIFAR-10), we call this centralized learning. " -"This concept of centralized learning, as shown in the previous section, is " -"probably known to most of you, and many of you have used it previously. " -"Normally, if you'd want to run machine learning workloads in a federated " -"fashion, then you'd have to change most of your code and set everything up " -"from scratch. This can be a considerable effort." -msgstr "" -"이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 단일 데이터 세트(CIFAR-10)" -"로 모델을 학습시키는데, 이를 중앙 집중식 학습이라고 부릅니다. 이전 섹션에서 " -"설명한 중앙 집중식 학습의 개념은 대부분 알고 계실 것이며, 많은 분들이 이전에 " -"사용해 보셨을 것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 " -"실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 합니다. " -"이는 상당한 노력이 필요할 수 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 -msgid "" -"However, with Flower you can evolve your pre-existing code into a federated " -"learning setup without the need for a major rewrite." -msgstr "" -"하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으" -"로 발전시킬 수 있습니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 -msgid "" -"The concept is easy to understand. We have to start a *server* and then use " -"the code in :code:`cifar.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The *clients* " -"run the training and update the parameters. The updated parameters are sent " -"back to the *server* which averages all received parameter updates. This " -"describes one round of the federated learning process and we repeat this for " -"multiple rounds." -msgstr "" -"개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에 연결된 *클라이언트*" -"에 대해 :code:`cifar.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 " -"클라이언트로 전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 " -"업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 " -"수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 프로세스의 " -"한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build " -"upon the previously defined centralized training in :code:`cifar.py`. Our " -"*client* needs to import :code:`flwr`, but also :code:`torch` to update the " -"parameters on our PyTorch model:" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`" -"에서 이전에 정의한 중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 " -":code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 업데이트하기 위해 " -":code:`torch`도 가져와야 합니다:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 -msgid "" -"Implementing a Flower *client* basically means implementing a subclass of " -"either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. Our " -"implementation will be based on :code:`flwr.client.NumPyClient` and we'll " -"call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier to " -"implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids some " -"of the boilerplate that would otherwise be necessary. :code:`CifarClient` " -"needs to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing " -"the model:" -msgstr "" -"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` " -"또는 :code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니" -"다. 우리의 구현은 :code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :" -"code:`CifarClient`라고 부를 것입니다. :code:`NumPyClient`는 파이토치나 텐서플" -"로우/Keras처럼 NumPy 상호운용성이 좋은 프레임워크를 사용하는 경우 필요한 일" -"부 보일러플레이트를 피하기 때문에 :code:`Client`보다 구현하기가 조금 더 쉽습" -"니다. code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모" -"델 학습을 위한 메서드 1개, 모델 테스트를 위한 메서드 1개 등 네 가지 메서드를 " -"구현해야 합니다:" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "로컬 손실 및 정확도를 서버에 반환합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/enable-tls.rst msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make " -"use of the functions :code:`train()` and :code:`test()` previously defined " -"in :code:`cifar.py`. So what we really do here is we tell Flower through " -"our :code:`NumPyClient` subclass which of our already defined functions to " -"call for training and evaluation. We included type annotations to give you a " -"better understanding of the data types that get passed around." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전" -"에 :code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합" -"니다. 따라서 여기서 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이" -"미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 Flower에 알려주는 것입니" -"다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습" -"니다." -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/enable-tls.rst msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load your " -"data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the " -"function :code:`fl.client.start_client()` by pointing it at the same IP " -"address we used in :code:`server.py`:" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -"이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생" -"성하고, 이 클라이언트를 시작하는 작업만 남았습니다. 코드:`cifar.py`를 사용하" -"여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과 동일한 IP 주" -"소를 지정하여 :code:`fl.client.start_client()` 함수로 :code:`CifarClient`를 " -"시작합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is running before you do so) and " -"see your (previously centralized) PyTorch project run federated learning " -"across two clients. Congratulations!" +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -"를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) " -"PyTorch 프로젝트가 두 클라이언트에서 연합 학습을 실행하는 것을 확인합니다. " -"축하합니다!" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 -msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more " -"clients?" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." msgstr "" -"이 예제의 전체 소스 코드: `파이토치: 중앙 Centralized에서 Federated으로 (코" -"드) `_. 물론 이 예제는 두 클라이언트가 완전히 동일한 데" -"이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 " -"않습니다. 이제 이 주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에" -"서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요? 클라이언트를 " -"더 추가하는 것은 어떨까요?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "차분 프라이버시" +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" +msgstr "Docker를 사용하여 Flower 실행" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/index.rst:4 msgid "" -"The information in datasets like healthcare, financial transactions, user " -"preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such data " -"is also sensitive and there is a risk of compromising individual privacy." +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -"의료, 금융 거래, 사용자 선호도 등과 같은 데이터 세트의 정보는 가치 있고 과학" -"적 혁신의 잠재력을 지니고 있으며 중요한 비즈니스 인사이트를 제공합니다. 그러" -"나 이러한 데이터는 또한 민감한 정보이며 개인의 프라이버시를 침해할 위험이 있" -"습니다." -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/index.rst:7 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where differential " -"privacy comes in. It provides the possibility of analyzing data while " -"ensuring the privacy of individuals." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -"익명화와 같은 기존 방법만으로는 재식별 및 데이터 연결과 같은 공격으로 인해 효" -"과가 없습니다. 그래서 차분 프라이버시가 등장했습니다. 차등 개인정보 보호는 개" -"인의 프라이버시를 보장하면서 데이터를 분석할 수 있는 가능성을 제공합니다." -#: ../../source/explanation-differential-privacy.rst:12 -msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the " -"individual's information remains hidden in the crowd." +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "시작하기" + +#: ../../source/docker/index.rst:19 +msgid "Running in Production" msgstr "" -"하나의 레코드(예: 앨리스의 데이터)를 제외하고는 동일한 두 개의 데이터 세트가 " -"있다고 상상해 보세요. 차분 프라이버(DP)는 평균 소득 계산과 같은 모든 분석(M)" -"이 두 데이터 세트에 대해 거의 동일한 결과를 산출하도록 보장합니다(O와 O' 는 " -"비슷할 것입니다). 이렇게 하면 그룹 패턴은 보존하면서 개별 세부 정보는 가려져 " -"개인의 정보가 군중 속에 숨겨집니다." -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" -msgstr "DP 소개" +#: ../../source/docker/index.rst:28 +#, fuzzy +msgid "Advanced Options" +msgstr "고급 Docker 옵션" -#: ../../source/explanation-differential-privacy.rst:22 -msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the analysis." +#: ../../source/docker/index.rst:40 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "Docker를 사용하여 Flower 실행" + +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -"DP를 달성하기 위해 가장 일반적으로 사용되는 메커니즘 중 하나는 분석의 전반적" -"인 정확도를 유지하면서 데이터에서 각 개인의 기여도를 가릴 수 있도록 분석 결과" -"에 충분한 노이즈를 추가하는 것입니다." -#: ../../source/explanation-differential-privacy.rst:25 -msgid "Formal Definition" -msgstr "공식 정의" +#: ../../source/docker/persist-superlink-state.rst:4 +#, fuzzy +msgid "" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." +msgstr "" +"기본적으로 Flower SuperLink는 상태를 in-memory에 유지합니다. Docker 플래그 `--rm``을 사용하는 경우" +" 컨테이너 시작 사이에 상태가 유지되지 않습니다. 아래에서 호스트 시스템의 파일에 상태를 저장하는 방법을 보여드리겠습니다." -#: ../../source/explanation-differential-privacy.rst:26 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a " -"single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, :math:`\\delta`)-" -"differential privacy if for any two neighboring databases, D :sub:`1` and D :" -"sub:`2`, that differ in only a single record, and for all possible outputs S " -"⊆ Range(A):" -msgstr "" -"차분 프라이버시(DP)는 공격자가 무작위 알고리즘의 출력을 통해 유추할 수 있는 " -"정보에 대해 통계적 보장을 제공합니다. 이는 노이즈를 추가하여 알고리즘의 출력" -"에 대한 한 개인의 영향력에 대한 무조건적인 상한선을 제공합니다[1]. 무작위 메" -"커니즘 M은 하나의 레코드만 다른 두 개의 인접 데이터베이스인 D:sub:`1`과 D:" -"sub:`2`의 경우, 가능한 모든 출력 S ⊆ Range(A)에 대해 (:math:`\\epsilon`, :" -"math:`\\delta`)-차분 프라이버시를 제공합니다:" +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/persist-superlink-state.rst:21 +#, fuzzy msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts for " -"a small probability on which the upper bound :math:`\\epsilon` does not " -"hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum " -"change in the output due to the inclusion or removal of a single record." -msgstr "" -"프라이버시 예산이라고도 하는 :math:`\\epsilon` 매개변수는 프라이버시 손실을 " -"측정하는 지표입니다. 이 매개변수는 프라이버시와 효용의 균형을 제어하며, :" -"math:`\\epsilon` 값이 낮을수록 프라이버시 수준이 높지만 효용도 감소할 가능성" -"이 높습니다. math:`\\delta` 매개변수는 상한값인 :math:`\\epsilon`이 적용되지 " -"않는 작은 확률을 설명합니다. 차분 프라이버시를 달성하는 데 필요한 노이즈의 양" -"은 출력의 감도에 비례하며, 이는 단일 레코드의 포함 또는 제거로 인한 출력의 최" -"대 변화를 측정합니다." - -#: ../../source/explanation-differential-privacy.rst:45 -msgid "Differential Privacy in Machine Learning" -msgstr "머신 러닝의 차분 프라이버시" +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." +msgstr "" +"아래 예에서는 새 디렉터리를 생성하고, 사용자 ID를 변경하고, 플래그 ``--volume``을 통해 Docker에게 로컬 " +"``state`` 디렉터리를 컨테이너의 ``/app/state`` 디렉터리에 마운트하도록 지시합니다. 또한 " +"``--database`` 플래그를 사용하여 데이터베이스 파일의 이름을 지정합니다." -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/persist-superlink-state.rst:36 +#, fuzzy msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific information " -"about any individual data points and subsequently prevent the model from " -"revealing sensitive information. Depending on the stage at which noise is " -"introduced, various methods exist for applying DP to machine learning " -"algorithms. One approach involves adding noise to the training data (either " -"to the features or labels), while another method entails injecting noise " -"into the gradients of the loss function during model training. Additionally, " -"such noise can be incorporated into the model's output." -msgstr "" -"머신 러닝에서 DP를 활용하여 학습 데이터의 개인정보를 보호할 수 있습니다. 차" -"분 비공개 머신 러닝 알고리즘은 알고리즘이 개별 데이터 포인트에 대한 특정 정보" -"를 학습하지 못하도록 하여 모델이 민감한 정보를 노출하지 않도록 하는 방식으로 " -"설계되었습니다. 노이즈가 도입되는 단계에 따라 머신 러닝 알고리즘에 DP를 적용" -"하는 다양한 방법이 존재합니다. 한 가지 방법은 학습 데이터(특징 또는 레이블)" -"에 노이즈를 추가하는 것이고, 다른 방법은 모델 학습 중에 손실 함수의 기울기에 " -"노이즈를 주입하는 것입니다. 또한 이러한 노이즈를 모델의 출력에 통합할 수도 있" -"습니다." - -#: ../../source/explanation-differential-privacy.rst:53 -msgid "Differential Privacy in Federated Learning" -msgstr "연합 학습의 차분 프라이버시" +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." +msgstr "" +"SuperLink가 시작되자마자 호스트 시스템의 ``state`` 디렉터리에 ``state.db`` 파일이 생성됩니다. 파일이 이미" +" 존재하는 경우 SuperLink는 파일에서 상태를 복원하려고 시도합니다. 빈 데이터베이스로 SuperLink를 시작하려면 " +"``state.db`` 파일을 제거하면 됩니다." + +#: ../../source/docker/pin-version.rst:2 +#, fuzzy +msgid "Pin a Docker Image to a Specific Version" +msgstr "특정 버전에 Docker 이미지 고정하기" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/pin-version.rst:4 +#, fuzzy msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information " -"about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -"연합 학습은 여러 당사자가 원시 데이터를 공유하지 않고도 공동으로 모델을 " -"학습할 수 있는 데이터 최소화 접근 방식입니다. 그러나 연합 학습은 새로운 " -"개인정보 보호 문제를 야기하기도 합니다. 당사자와 중앙 서버 간의 모델 " -"업데이트는 로컬 데이터에 대한 정보를 유출할 수 있습니다. 이러한 유출은 " -"멤버십 추론 및 속성 추론 공격이나 모델 반전 공격과 같은 공격에 악용될 수 " -"있습니다." +"태그 뒤에 있는 이미지가 업데이트될 수 있습니다. 이러한 업데이트에는 일반적으로 Flower의 기능을 변경해서는 안 되는 시스템 " +"의존성에 대한 보안 업데이트가 포함됩니다. 그러나 항상 동일한 이미지를 사용하려면 태그 대신 이미지의 해시를 지정할 수 있습니다." -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/pin-version.rst:14 +#, fuzzy msgid "" -"DP can play a crucial role in federated learning to provide privacy for the " -"clients' data." -msgstr "DP는 연합 학습에서 클라이언트의 데이터에 대한 개인 정보 보호를 제공하는 데 " -"중요한 역할을 할 수 있습니다." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니다:" -#: ../../source/explanation-differential-privacy.rst:60 -msgid "" -"Depending on the granularity of privacy provision or the location of noise " -"addition, different forms of DP exist in federated learning. In this " -"explainer, we focus on two approaches of DP utilization in federated " -"learning based on where the noise is added: at the server (also known as the " -"center) or at the client (also known as the local)." +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" msgstr "" -"개인 정보 제공의 세분성 또는 노이즈 추가 위치에 따라 연합 학습에는 다양한 " -"형태의 DP가 존재합니다. 이 설명에서는 노이즈가 추가되는 위치에 따라 서버(" -"중앙이라고도 함) 또는 클라이언트(로컬이라고도 함)에서의 연합 학습에서 DP를 " -"활용하는 두 가지 접근 방식에 중점을 둡니다." -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/pin-version.rst:30 +#, fuzzy +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "다음으로, 새 SuperLink 컨테이너를 실행할 때 해시를 고정할 수 있습니다:" + +#: ../../source/docker/run-as-root-user.rst:2 +#, fuzzy +msgid "Run with Root User Privileges" +msgstr "루트 사용자 권한으로 실행" + +#: ../../source/docker/run-as-root-user.rst:4 +#, fuzzy msgid "" -"**Central Differential Privacy**: DP is applied by the server and the goal " -"is to prevent the aggregated model from leaking information about each " -"client's data." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -"**중앙 차분 프라이버시**: DP는 서버에서 적용되며 집계된 모델이 각 클라이언트" -"의 데이터에 대한 정보를 유출하는 것을 방지하는 것이 목표입니다." +"기본적으로 Flower Docker 이미지는 루트 사용자가 아닌 사용자(사용자명/그룹명:``app``, UID/GID: " +"``49999``)로 실행됩니다. 빌드 프로세스 중 특정 작업에 필요한 경우가 아니라면 루트 사용자를 사용하지 않는 것이 좋습니다." +" 보안 모범 사례를 유지하려면 항상 프로덕션 환경에서 루트 사용자가 아닌 사용자로 컨테이너를 실행해야 합니다." -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the updates " -"that are sent to the server from leaking any information about the client's " -"data." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -"**로컬 차분 개인정보 보호**: DP는 정보를 서버로 보내기 전에 클라이언트 측에" -"서 적용되며, 서버로 전송되는 업데이트가 클라이언트 데이터에 대한 정보를 유출" -"하는 것을 방지하는 것이 목표입니다." -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 -msgid "Central Differential Privacy" -msgstr "중앙 차분 프라이버시" +#: ../../source/docker/run-as-root-user.rst:12 +#, fuzzy +msgid "Run a Container with Root User Privileges" +msgstr "**루트 사용자 권한으로 컨테이너 실행하기**" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"In this approach, which is also known as user-level DP, the central server " -"is responsible for adding noise to the globally aggregated parameters. It " -"should be noted that trust in the server is required." -msgstr "" -"사용자 수준 DP라고도 하는 이 접근 방식에서는 중앙 서버가 전역적으로 집계된 매" -"개변수에 노이즈를 추가하는 역할을 담당합니다. 서버에 대한 신뢰가 필요하다는 " -"점에 유의해야 합니다." - -#: ../../source/explanation-differential-privacy.rst:76 -msgid "" -"While there are various ways to implement central DP in federated learning, " -"we concentrate on the algorithms proposed by [2] and [3]. The overall " -"approach is to clip the model updates sent by the clients and add some " -"amount of noise to the aggregated model. In each iteration, a random set of " -"clients is chosen with a specific probability for training. Each client " -"performs local training on its own data. The update of each client is then " -"clipped by some value `S` (sensitivity `S`). This would limit the impact of " -"any individual client which is crucial for privacy and often beneficial for " -"robustness. A common approach to achieve this is by restricting the `L2` " -"norm of the clients' model updates, ensuring that larger updates are scaled " -"down to fit within the norm `S`." -msgstr "" -"연합 학습에서 중앙 DP를 구현하는 방법은 여러 가지가 있지만, 여기서는 [2]와 " -"[3]에서 제안한 알고리즘에 집중합니다. 전반적인 접근 방식은 클라이언트가 " -"전송한 모델 업데이트를 잘라내고 집계된 모델에 약간의 노이즈를 추가하는 " -"것입니다. 각 반복에서 특정 확률로 훈련할 무작위 클라이언트 세트가 " -"선택됩니다. 각 클라이언트는 자체 데이터에 대해 로컬 학습을 수행합니다. 그런 " -"다음 각 클라이언트의 업데이트는 특정 값 `S`(민감도 `S`)에 의해 잘립니다. " -"이렇게 하면 개별 클라이언트의 영향을 제한할 수 있어 개인정보 보호에 중요하고 " -"견고성에 도움이 되는 경우가 많습니다. 이를 달성하기 위한 일반적인 접근 " -"방식은 클라이언트 모델 업데이트의 `L2` 규범을 제한하여 더 큰 업데이트가 규범 " -"`S`에 맞도록 축소되도록 하는 것입니다." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" +msgstr "``-u`` 플래그를 사용하여 Docker 이미지를 실행하고 사용자 이름으로 ``root``를 지정합니다:" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" -msgstr "클리핑" +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "이 명령은 루트 사용자 권한으로 Docker 컨테이너를 실행합니다." -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-as-root-user.rst:24 +#, fuzzy +msgid "Run the Build Process with Root User Privileges" +msgstr "**루트 사용자 권한으로 빌드 프로세스를 실행합니다**" + +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to distort " -"the sum of all clients' updates. The amount of noise is scaled to the " -"sensitivity value to obtain a privacy guarantee. The Gaussian mechanism is " -"used with a noise sampled from `N (0, σ²)` where `σ = ( noise_scale * S ) / " -"(number of sampled clients)`." +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -"그 후 가우시안 메커니즘을 사용하여 모든 클라이언트의 업데이트 합계를 왜곡하" -"기 위해 노이즈를 추가합니다. 노이즈의 양은 감도 값에 따라 조정되어 프라이버" -"시 보장을 얻습니다. 가우시안 메커니즘은 `N (0, σ²)`에서 샘플링된 노이즈와 함" -"께 사용됩니다. 여기서 `σ = (noise_scale * S) / (샘플링된 클라이언트 수)`입니" -"다." +"Docker 이미지 빌드 과정에서 루트 사용자로 전환하여 누락된 시스템 의존성을 설치하려면 Dockerfile 내에서 ``USER " +"root`` 지시어를 사용할 수 있습니다." -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" -msgstr "클리핑" +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "SuperNode Dockerfile 만들기" -#: ../../source/explanation-differential-privacy.rst:96 -msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed Clipping " -"and Adaptive Clipping." -msgstr "중앙 DP에서 일반적으로 사용되는 클리핑에는 고정 클리핑과 조정 클리핑의 두 " -"가지 형태가 있습니다." +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "린터 및 테스트 실행" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude of " -"clients' updates. Any update exceeding this threshold is clipped back to the " -"threshold value." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -"**고정 클리핑** : 클라이언트의 업데이트 크기에 대해 미리 정의된 고정 임계값" -"이 설정됩니다. 이 임계값을 초과하는 모든 업데이트는 임계값으로 다시 클리핑됩" -"니다." -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based on " -"the observed update distribution [4]. It means that the clipping value is " -"tuned during the rounds with respect to the quantile of the update norm " -"distribution." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -"**조정 클리핑** : 클리핑 임계값은 관찰된 업데이트 분포에 따라 동적으로 " -"조정됩니다[4]. 즉, 클리핑 값은 업데이트 표준 분포의 사분위수에 따라 라운드가 " -"진행되는 동안 조정됩니다." -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/docker/run-as-subprocess.rst:17 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Flower SuperNode" + +#: ../../source/docker/run-as-subprocess.rst:31 +#, fuzzy msgid "" -"The choice between fixed and adaptive clipping depends on various factors " -"such as privacy requirements, data distribution, model complexity, and " -"others." +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" -"고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 요구 사항, 데이터 배포, 모" -"델 복잡성 등 다양한 요인에 따라 달라집니다." +"다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 SuperNode Docker " +"이미지를 빌드합니다." -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -msgid "Local Differential Privacy" -msgstr "로컬 차분 프라이버시" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted that " -"local DP leads to a decrease in accuracy but better privacy in comparison to " -"central DP." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -"이 접근 방식에서는 각 클라이언트가 DP를 수행할 책임이 있습니다. 로컬 DP는 완" -"전히 신뢰할 수 있는 애그리게이터가 필요하지 않지만, 로컬 DP는 중앙 DP에 비해 " -"정확도는 떨어져도 개인 정보 보호는 더 우수하다는 점에 유의해야 합니다." -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" -msgstr "이 설명에서는 로컬 DP를 달성하는 두 가지 형태에 중점을 둡니다:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering the " -"sensitivity of the local model to be ∆, Gaussian noise is applied with a " -"noise scale of σ where:" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -"각 클라이언트는 로컬 업데이트를 서버로 보내기 전에 로컬 업데이트에 노이즈를 " -"추가합니다. 로컬 모델의 감도를 ∆로 간주하여 가우시안 노이즈가 σ의 노이즈 스케" -"일로 적용되어 (:math:`\\epsilon`, :math:`\\delta`)-DP를 달성하기 위해, 여기" -"서 σ는 노이즈 스케일입니다:" -#: ../../source/explanation-differential-privacy.rst:120 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right)}}" -"{\\epsilon}\n" -"\n" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times \\log\\left(\\frac{1.25}{\\delta}\\right)}}" -"{\\epsilon}\n" -"\n" -#: ../../source/explanation-differential-privacy.rst:125 -msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." -msgstr "" -"각 클라이언트는 로컬 트레이닝(DP-SGD) 중에 모델의 gradient에 노이즈를 추가합" -"니다. 보다 구체적으로, 이 접근 방식에서는 gradient이 클리핑되고 보정된 노이즈" -"가 gradient에 주입됩니다." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" -#: ../../source/explanation-differential-privacy.rst:128 -msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -"이 두 가지 접근 방식은 서로 다른 수준의 개인정보 보호 기능을 제공한다는 점에 " -"유의하세요." -#: ../../source/explanation-differential-privacy.rst:131 -msgid "**References:**" -msgstr "**참고:**" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Docker 데몬이 실행 중인지 확인하십시오." -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." -msgstr "[1] Dwork 외. 차분 프라이버시의 알고리즘적 기초." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language Models." -msgstr "[2] McMahan 외. 차분적 개인 반복 언어 모델 학습." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "예시 요청" -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client Level " -"Perspective." -msgstr "[3] Geyer 외. 차분적 개인 연합 학습: 고객 수준의 관점." +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"[4] Galen et al. Differentially Private Learning with Adaptive Clipping." -msgstr "[4] Galen 외. 조정형 클리핑을 통한 차분적 개인 학습." +"Download the `compose.yml " +"`_" +" file into the example directory:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "연합 평가" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 +#, fuzzy msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or client-" -"side) evaluation." -msgstr "" -"연합 학습 시스템에서 모델을 평가하는 데는 중앙 집중식(또는 서버 측) 평가와 " -"연합(또는 클라이언트 측) 평가라는 두 가지 주요 접근 방식이 있습니다." - -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "중앙 집중식 평가" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "기본 제공 전략" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 +#, fuzzy +msgid "pyproject.toml" +msgstr "또는 ``pyproject.toml``:" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -"모든 기본 제공 전략은 초기화 중에 평가 함수를 제공하여 중앙 집중식 평가를 " -"지원합니다. 평가 함수는 현재 글로벌 모델 파라미터를 입력으로 받아 평가 " -"결과를 반환할 수 있는 모든 함수입니다:" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "사용자 정의 전략" - -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"The :code:`Strategy` abstraction provides a method called :code:`evaluate` " -"that can directly be used to evaluate the current global model parameters. " -"The current server implementation calls :code:`evaluate` after parameter " -"aggregation and before federated evaluation (see next paragraph)." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -"코드:`전략` 추상화는 현재 전역 모델 파라미터를 평가하는 데 직접 사용할 수 있" -"는 :코드:`평가`라는 메서드를 제공합니다. 현재 서버 구현에서는 매개변수 집계 " -"후와 연합 평가 전에 :code:`evaluate`를 호출합니다(다음 단락 참조)." -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "연합 평가" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 +#, fuzzy +msgid "Run the example:" +msgstr "전체 코드 예제" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "연합 평가 구현" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and can " -"be configured from the server side." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -"클라이언트 측 평가는 :code:`Client.evaluate` 메서드에서 이루어지며 서버 측에" -"서 구성할 수 있습니다." -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "연합 평가 구성" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" -msgstr "연합 평가는 서버 측에서 구성할 수 있습니다. 기본 제공 전략은 다음 인수를 " -"지원합니다:" - -#: ../../source/explanation-federated-evaluation.rst:105 -msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of clients " -"that will be selected for evaluation. If :code:`fraction_evaluate` is set " -"to :code:`0.1` and :code:`100` clients are connected to the server, then :" -"code:`10` will be randomly selected for evaluation. If :code:" -"`fraction_evaluate` is set to :code:`0.0`, federated evaluation will be " -"disabled." -msgstr "" -":code:`fraction_evaluate`: 평가를 위해 선택될 클라이언트의 비율을 정의하는 " -":code:`float`입니다. 코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 " -":code:`100` 클라이언트가 서버에 연결되어 있는 경우 :code:`10`이 평가를 위해 " -"무작위로 선택됩니다. code:`fraction_evaluate`가 :code:`0.0`으로 설정된 경우 " -"연합 평가가 비활성화됩니다." - -#: ../../source/explanation-federated-evaluation.rst:106 -msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of clients " -"to be selected for evaluation. If :code:`fraction_evaluate` is set to :code:" -"`0.1`, :code:`min_evaluate_clients` is set to 20, and :code:`100` clients " -"are connected to the server, then :code:`20` clients will be selected for " -"evaluation." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -":code:`min_evaluate_clients`: 평가를 위해 선택할 최소 클라이언트 수. :code:" -"`int`. 코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:" -"`fraction_evaluate`가 20으로 설정되어 있으며 :code:`100` 클라이언트가 서버에 " -"연결되어 있는 경우 :code:`20` 클라이언트가 평가를 위해 선택됩니다." -#: ../../source/explanation-federated-evaluation.rst:107 -msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round of " -"federated evaluation can start. If fewer than :code:`min_available_clients` " -"are connected to the server, the server will wait until more clients are " -"connected before it continues to sample clients for evaluation." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -":code:`min_available_clients`: federated 평가 단계를 시작하기 전에 서버에 연" -"결해야 하는 최소 클라이언트 수를 정의하는 :code:`int`입니다. 서버에 연결된 클" -"라이언트가 :code:`min_available_clients`보다 적으면 서버는 더 많은 클라이언트" -"가 연결될 때까지 기다렸다가 평가를 위한 클라이언트 샘플링을 계속합니다." -#: ../../source/explanation-federated-evaluation.rst:108 -msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will be " -"called during each round and provides a convenient way to customize client-" -"side evaluation from the server side, for example, to configure the number " -"of validation steps performed." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#, fuzzy +msgid "Limitations" +msgstr "동기" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "Quickstart Example" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-fastai" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" msgstr "" -"code:`on_evaluate_config_fn`: 선택한 클라이언트로 전송할 구성 사전을 반환하" -"는 함수입니다. 이 함수는 각 단계 중에 호출되며, 서버 측에서 클라이언트 측 평" -"가를 사용자 지정하는 편리한 방법을 제공합니다(예: 수행되는 유효성 검사 단계 " -"수 구성)." -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "훈련 중 로컬 모델 업데이트 평가" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "빠른 시작 튜토리얼" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-jax" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 msgid "" -"Model parameters can also be evaluated during training. :code:`Client.fit` " -"can return arbitrary evaluation results as a dictionary:" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" -"모델 파라미터는 훈련 중에도 평가할 수 있습니다. :code:`Client.fit`은 임의의 " -"평가 결과를 dictionary로 반환할 수 있습니다:" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "전체 코드 예제" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-mlx" +msgstr "빠른 시작" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"For a full code example that uses both centralized and federated evaluation, " -"see the *Advanced TensorFlow Example* (the same approach can be applied to " -"workloads implemented in any other framework): https://github.com/adap/" -"flower/tree/main/examples/advanced-tensorflow" +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -"연합 평가와 중앙 집중식 평가를 모두 사용하는 전체 코드 예제는 *고급 " -"텐서플로우 예제*(다른 프레임워크에서 구현된 워크로드에도 동일한 접근 방식을 " -"적용할 수 있음)를 참조하세요: https://github.com/adap/flower/tree/main/" -"examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 템플릿" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +#, fuzzy +msgid "quickstart-monai" +msgstr "빠른 시작" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "목차" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#, fuzzy +msgid "quickstart-pandas" +msgstr "빠른 시작 튜토리얼" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[목차](#목차)" - -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[요약](#요약)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +msgid "quickstart-pytorch-lightning" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[동기](#동기)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +msgid "" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[목표](#목표)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "빠른 시작 튜토리얼" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[비목표](#비목표)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "quickstart-sklearn-tabular" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[제안](#제안)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "빠른 시작 튜토리얼" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[단점](#단점)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "빠른 시작 튜토리얼" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[고려되는 대안](#고려되는 대안)" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[부록](#부록)" +#: ../../source/docker/set-environment-variables.rst:2 +#, fuzzy +msgid "Set Environment Variables" +msgstr "환경 변수 설정" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "요약" +#: ../../source/docker/set-environment-variables.rst:4 +#, fuzzy +msgid "" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." +msgstr "Docker 컨테이너 내에서 변수를 설정하려면 ``-e =`` 플래그를 사용하면 됩니다." -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 문장 1: 문제 요약\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +#, fuzzy +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "빠른 시작 튜토리얼" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 +msgid "" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "동기" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 +msgid "" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "목표" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "비목표" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "제안" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 +msgid "" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "단점" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "고려되는 대안" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[대안 1\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 +msgid "" +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[대안 2\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 +msgid "" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower Enhancement Doc" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[Metadata](#metadata)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[워크플로우](#워크플로우)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +msgid "" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 +msgid "" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 +msgid "" +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "변화의 동기가 분명한지 확인합니다" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 +msgid "" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +msgid "Step 3: Start the Flower Server Components" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 msgid "" -"ensure community participants can successfully drive changes to completion " -"across one or more releases while stakeholders are adequately represented " -"throughout the process" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -"커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 성공적으로 완료할 수 있" -"도록 하는 동시에 이해 관계자가 프로세스 전반에 걸쳐 적절히 대표되도록 보장합" -"니다" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "기능 및 effort-tracking 문서" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "제품 요구 사항 문서" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "디자인 문서" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "서버(SuperLink)" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." +"On your local machine, run the following command to start the client " +"components:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand and " -"communicate upcoming changes to the project." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." msgstr "" -"Flower에 제안된 변경 사항이나 기능을 멀리 가져오는 경우, 프로젝트의 향후 변" -"경 사항을 이해하고 전달하기 위해 단일 GitHub 이슈 또는 pull request를 넘어서" -"는 abstraction이 필요합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Flower SuperNode를 실행합니다." + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 msgid "" -"The purpose of this process is to reduce the amount of \"tribal knowledge\" " -"in our community. By moving decisions from Slack threads, video calls, and " -"hallway conversations into a well-tracked artifact, this process aims to " -"enhance communication and discoverability." +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" msgstr "" -"이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 양을 줄이는 것입니다. 이 프" -"로세스는 Slack 스레드, 영상 통화, 복도 대화에서 나온 의사 결정을 잘 추적된 아" -"티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을 향상시키는 것을 목표로 합니" -"다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement " -"process. If an enhancement would be described in either written or verbal " -"communication to anyone besides the author or developer, then consider " -"creating an Enhancement Doc." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "" -"대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 개선 프로세스를 따라야 " -"합니다. 개선 사항을 작성자나 개발자 이외의 다른 사람에게 서면 또는 구두로 설" -"명해야 하는 경우에는 개선 문서 작성을 고려하세요." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"Similarly, any technical effort (refactoring, major architectural change) " -"that will impact a large section of the development community should also be " -"communicated widely. The Enhancement process is suited for this even if it " -"will have zero impact on the typical user or operator." +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -"마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 기술적 노력(리팩토링, 주" -"요 아키텍처 변경)도 널리 알려야 합니다. 개선 프로세스는 일반 사용자나 운영자" -"에게 전혀 영향을 미치지 않더라도 이를 위해 적합합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 msgid "" -"For small changes and additions, going through the Enhancement process would " -"be time-consuming and unnecessary. This includes, for example, adding new " -"Federated Learning algorithms, as these only add features without changing " -"how Flower works or is used." +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -"작은 변경 및 추가의 경우, 개선 프로세스를 거치는 것은 시간이 많이 걸리고 " -"불필요합니다. 예를 들어, 새로운 연합 학습 알고리즘을 추가하는 것은 Flower의 " -"작동 방식이나 사용 방식을 변경하지 않고 기능만 추가하는 것이기 때문입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Flower 클라이언트 앱을 실행합니다." + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +#, fuzzy +msgid "Quickstart with Docker" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/tutorial-quickstart-docker.rst:4 msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by members " -"of the community." +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -"기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지" -"하는 것이므로 기능 요청과는 다릅니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:7 msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for reference " -"— the Enhancement Doc." +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -"개선 사항은 정의된 템플릿과 참조용으로 Enhancement Doc.를 검토하고 저장하는 " -"워크플로우를 따르는 Markdown 파일에 캡처됩니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "Enhancement Doc 템플릿" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:45 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "서버(SuperLink)" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "Title (metadata와 같게)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +msgid "Open your terminal and run:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "Table of Contents (필요시)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "Notes/Constraints/Caveats (선택 사항)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "Design Details (선택 사항)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "졸업 기준" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 +msgid "" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "참고로 이 문서는 위의 구조를 따릅니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "Metadata" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement " -"Doc + 1. With this number, it becomes easy to reference other proposals." +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -"**피드 번호** (필수) 마지막 Flower Enhancement 문서의 `피드 번호` + 1. 이 번" -"호를 사용하면 다른 제안을 쉽게 참조할 수 있습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**status** (Required) The current status of the proposal. See [workflow]" -"(#workflow) for the possible states." +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -"**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)" -"를 참조하세요." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply the " -"GitHub ID." -msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first submitted " -"in a PR." -msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 -msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +msgid "Step 3: Start the SuperNode" msgstr "" -"**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 -msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to this " -"one." -msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "" -"**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "워크플로우" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 -msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the author, " -"who shepherds the enhancement. This person also has to find committers to " -"Flower willing to review the proposal." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." msgstr "" -"개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나 제안된 적이 있" -"어야 합니다. 따라서 개선 사항을 주도하는 사(보통 작성자)이 필요합니다. 이 사" -"람은 또한 제안을 검토할 의향이 있는 Flower 커미터를 찾아야 합니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement Doc " -"number, to `enhancements`. All enhancements start in `provisional` state as " -"part of a pull request. Discussions are done as part of the pull request " -"review." +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" -"새 개선 사항은 `NNNN-YYYYMMDD-enhancement-title.md` 형식의 파일 이름으로 체크" -"인되며, `NNNN`은 Flower 개선 문서 번호이고 `enhancements`에 해당합니다. 모든 " -"개선 사항은 pull request의 일부로 `잠정` 상태에서 시작됩니다. 토론은 pull " -"request 검토의 일부로 이루어집니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Once an enhancement has been reviewed and approved, its status is changed to " -"`implementable`. The actual implementation is then done in separate pull " -"requests. These pull requests should mention the respective enhancement as " -"part of their description. After the implementation is done, the proposal " -"status is changed to `implemented`." +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" msgstr "" -"개선 사항이 검토 및 승인되면 상태가 '구현 가능'으로 변경됩니다. 그런 다음 실" -"제 구현은 별도의 pull requests를 통해 이루어집니다. 이러한 pull requests는 설" -"명의 일부로 해당 개선 사항을 언급해야 합니다. 구현이 완료되면 제안 상태는 '구" -"현됨'으로 변경됩니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has the " -"following states:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." msgstr "" -"특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed out " -"and actively defined and discussed." +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -"'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다. 제안이 구체화되고 활" -"발하게 정의 및 논의되는 동안의 시작 단계입니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "" -"`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement is " -"not moving forward." +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -"`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니" -"다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 -msgid "" -"Adding an additional process to the ones already provided by GitHub (Issues " -"and Pull Requests) adds more complexity and can be a barrier for potential " -"first-time contributors." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." msgstr "" -"GitHub에서 이미 제공하는 프로세스(이슈 및 Pull Requests)에 추가 프로세스를 추" -"가하면 더 복잡해지고 잠재적인 처음인 기여자에게는 장벽이 될 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden for " -"non-native English speakers." +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -"현재 기능 이슈 템플릿에서 요구되는 한 문장 설명 이상으로 제안서 템플릿을 확장" -"하는 것은 영어가 모국어가 아닌 사용자에게는 큰 부담이 될 수 있습니다." -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 이슈" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other issues. " -"The main issue is in discussing and reviewing an enhancement: GitHub issues " -"only have a single thread for comments. Enhancements usually have multiple " -"threads of discussion at the same time for various parts of the doc. " -"Managing these multiple discussions can be confusing when using GitHub " -"Issues." -msgstr "" -"이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 가능합니다. 예를 들어 태그" -"를 사용하여 다른 이슈와 구별하고 필터링할 수 있습니다. 주요 이슈는 개선 사항" -"에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는 댓글 스레드가 하나만 있습" -"니다. 개선 사항에는 일반적으로 문서의 여러 부분에 대해 동시에 여러 개의 토론 " -"스레드가 있습니다. GitHub 이슈를 사용할 때 이러한 여러 토론을 관리하면 혼란스" -"러울 수 있습니다." +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +msgid "Start the second container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "Google 문서 도구" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs " -"are hosted outside the project, their discoverability by the community needs " -"to be taken care of. A list of links to all proposals has to be managed and " -"made available for the community. Compared to shipping proposals as part of " -"Flower's repository, the potential for missing links is much higher." +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -"Google 문서는 여러 스레드의 토론을 허용합니다. 하지만 Google 문서는 프로젝트 " -"외부에서 호스팅되므로 커뮤니티에서 검색할 수 있도록 관리해야 합니다. 모든 제" -"안에 대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. Flower 저장소의 일" -"부로 제안서를 보낼 때와 비교하면 링크가 누락될 가능성이 훨씬 더 높습니다." - -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 개선 문서" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "종합 평가 결과" - -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" -"The Flower server does not prescribe a way to aggregate evaluation results, " -"but it enables the user to fully customize result aggregation." +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -"Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 " -"집계를 완전히 사용자 지정할 수 있습니다." -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "사용자 지정 평가 결과 집계" +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "flower 클라이언트 앱" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "Understand the Dockerfile" +msgstr "SuperNode Dockerfile 만들기" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate " -"custom evaluation results coming from individual clients. Clients can return " -"custom metrics to the server by returning a dictionary:" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오" -"는 사용자 지정 평가 결과를 집계할 수 있습니다. 클라이언트는 dictionary를 반환" -"하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하" -"는 메트릭을 집계할 수 있습니다:" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" -msgstr "SuperNodes 인증하기" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use to " -"verify the identities of each SuperNode connecting to a SuperLink. Flower " -"node authentication works similar to how GitHub SSH authentication works:" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있" -"는 인증된 SuperNodes에 대한 기본 지원을 제공합니다. Flower 노드 인증은 " -"GitHub SSH 인증 방식과 유사하게 작동합니다:" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" -msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared secret" +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -"SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -"비밀 공유는 SuperNode에서 SuperLink로 토큰으로 전송된 메시지의 HMAC 값을 계산" -"하는 데 사용됩니다" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" -msgstr "SuperLink가 토큰을 확인합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "from the ``pyproject.toml``." +msgstr "또는 ``pyproject.toml``:" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"We recommend you to check out the complete `code example `_ demonstrating " -"federated learning with Flower in an authenticated setting." +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -"인증된 환경에서 Flower로 연합 학습을 시연하는 전체 '코드 예제 `_를 확인하는 것이 " -"좋습니다." -#: ../../source/how-to-authenticate-supernodes.rst:15 -msgid "" -"This guide covers a preview feature that might change in future versions of " -"Flower." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" msgstr "" -"이 가이드에서는 향후 버전의 Flower에서 변경될 수 있는 미리보기 기능에 대해 설" -"명합니다." -#: ../../source/how-to-authenticate-supernodes.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For increased security, node authentication can only be used when encrypted " -"connections (SSL/TLS) are enabled." +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -"보안을 강화하기 위해 노드 인증은 암호화된 연결(SSL/TLS)을 사용하도록 설정한 " -"경우에만 사용할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" -msgstr ":code:`SuperLink`에서 노드 인증 활성화" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can find " -"the complete guide `here `_. After configuring secure connections, you can enable " -"client authentication in a long-running Flower :code:`SuperLink`. Use the " -"following terminal command to start a Flower :code:`SuperNode` that has both " -"secure connections and node authentication enabled:" -msgstr "" -"노드 인증을 활성화하려면 먼저 SuperLink<>SuperNode 통신을 보호하기 위해 SSL/" -"TLS 연결을 구성해야 합니다. 전체 가이드는 `여기 `_에서 확인할 수 있습니다. 보안 " -"연결을 구성한 후, 장기 실행하는 Flower :code:`SuperLink`에서 클라이언트 인증" -"을 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 보안 연결과 노드 인증이 " -"모두 활성화된 Flower :code:`SuperNode`를 시작하세요:" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "Let's break down the authentication flags:" -msgstr "인증 플래그를 세분화해 보겠습니다:" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 +#: ../../source/docker/tutorial-quickstart-docker.rst:186 msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV file " -"storing all known node public keys. You need to store all known node public " -"keys that are allowed to participate in a federation in one CSV file (:code:" -"`.csv`)." +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -"첫 번째 플래그 :code:`--auth-list-public-keys`는 알려진 모든 노드 공개키를 저" -"장하는 CSV 파일의 경로를 기대합니다. federation에 참여하도록 허용된 모든 알려" -"진 노드 공개 키를 하나의 CSV 파일(:code:`.csv`)에 저장해야 합니다." -#: ../../source/how-to-authenticate-supernodes.rst:42 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 +#, fuzzy msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two known " -"node public keys." +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -"알려진 노드 공개키를 저장하는 유효한 CSV 파일은 쉼표로 구분하고 주석 없이 " -"OpenSSH 형식으로 키를 나열해야 합니다. 예를 들어, 두 개의 알려진 노드 공개키" -"가 포함된 CSV 파일이 포함된 코드 샘플을 참조하세요." +"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하여 ServerApp Docker 이미지를" +" 빌드합니다." -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#, fuzzy msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code:`--" -"auth-superlink-public-key` expect paths to the server's private and public " -"keys. For development purposes, you can generate a private and public key " -"pair using :code:`ssh-keygen -t ecdsa -b 384`." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" -"두 번째 및 세 번째 플래그 :code:`--auth-superlink-private-key` 및 :code:`--" -"auth-superlink-public-key`는 서버의 개인 및 공개 키의 경로를 예상합니다. 개" -"발 목적으로 :code:`ssh-keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍" -"을 생성할 수 있습니다." +"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습니다. 여기서 선택한 값은 예시일 " +"뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:47 -msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of known " -"nodes, you need to shut the server down, edit the CSV file, and start the " -"server again. Support for dynamically changing the set of known nodes is on " -"the roadmap to be released in Flower 1.10 (ETA: June)." +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +msgid "Start the first ClientApp container:" msgstr "" -"Flower 1.9에서는 알려진 노드 공개키를 SuperLink에 동적으로 제거, 편집 또는 추" -"가하는 기능이 지원되지 않습니다. 알려진 노드 집합을 변경하려면 서버를 종료하" -"고 CSV 파일을 편집한 다음 서버를 다시 시작해야 합니다. 알려진 노드 집합을 동" -"적으로 변경하는 기능은 Flower 1.10(출시 예정일: 6월)에서 로드맵에 포함되어 있" -"습니다." -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" -msgstr ":code:`SuperNode`에서 노드 인증을 활성화합니다" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client (:code:" -"`SuperNode`). Use the following terminal command to start an authenticated :" -"code:`SuperNode`:" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -"장기 실행 중인 Flower 서버(:code:`SuperLink`)와 마찬가지로, 장기 실행 중인 " -"Flower 클라이언트(:code:`SuperNode`)에서도 노드 인증을 쉽게 활성화할 수 있습" -"니다. 다음 터미널 명령을 사용하여 인증된 :code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-authenticate-supernodes.rst:66 -msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the node's " -"private key file and the :code:`--auth-supernode-public-key` flag expects a " -"path to the node's public key file. For development purposes, you can " -"generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b " -"384`." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -":code:`--auth-supernode-private-key` 플래그는 노드의 개인 키 파일 경로를, :" -"code:`--auth-supernode-public-key` 플래그는 노드의 공개 키 파일 경로를 예상합" -"니다. 개발 목적으로 :code:`ssh-keygen -t ecdsa -b 384`를 사용하여 개인 및 공" -"개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "Security notice" -msgstr "보안 공지" +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 -msgid "" -"The system's security relies on the credentials of the SuperLink and each " -"SuperNode. Therefore, it is imperative to safeguard and safely store the " -"credentials to avoid security risks such as Public Key Infrastructure (PKI) " -"impersonation attacks. The node authentication mechanism also involves human " -"interaction, so please ensure that all of the communication is done in a " -"secure manner, using trusted communication methods." +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "Step 5: Start the SuperExec" msgstr "" -"시스템의 보안은 SuperLink와 각SuperNode의 자격 증명에 의존합니다. 따라서 공개" -"키 기반구조(PKI) 사칭 공격과 같은 보안 위험을 피하기 위해 자격 증명을 보호하" -"고 안전하게 보관하는 것이 필수적입니다. 노드 인증 메커니즘에는 사람의 상호 작" -"용도 포함되므로 모든 통신이 신뢰할 수 있는 통신 방법을 사용하여 안전한 방식으" -"로 이루어지도록 하세요." -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "결론" +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#, fuzzy +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니다." -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"You should now have learned how to start a long-running Flower server (:code:" -"`SuperLink`) and client (:code:`SuperNode`) with node authentication " -"enabled. You should also know the significance of the private key and store " -"it safely to minimize security risks." +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -"이제 노드 인증이 활성화된 상태에서 장기간 실행되는 Flower 서버(:code:" -"`SuperLink`)와 클라이언트(:code:`SuperNode`)를 시작하는 방법을 배웠을 것입니" -"다. 또한 보안 위험을 최소화하기 위해 개인키의 중요성을 알고 안전하게 보관해" -"야 합니다." - -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" -msgstr "클라이언트 구성" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are, " -"for example, a popular way to control client-side hyperparameters from the " -"server." +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -"모델 파라미터와 함께 Flower는 설정 값을 클라이언트에 전송할 수 있습니다. 구" -"성 값은 다양한 용도로 사용할 수 있습니다. 예를 들어 서버에서 클라이언트 측 하" -"이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "구성 값" +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" +msgstr "" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Configuration values are represented as a dictionary with ``str`` keys and " -"values of type ``bool``, ``bytes``, ``double`` (64-bit precision float), " -"``int``, or ``str`` (or equivalent types in different languages). Here is an " -"example of a configuration dictionary in Python:" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -"구성 값은 ``str`` 키와 ``bool``, ``bytes``, ``double``(64비트 정밀도 정수), " -"``int`` 또는 ``str``(또는 다른 언어의 동등한 유형) 유형의 값으로 구성된 사전" -"으로 표현됩니다. 다음은 Python의 구성 사전 예제입니다:" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client using " -"gRPC, and then deserializes them back to Python dictionaries." +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -"Flower는 이러한 구성 dictionaries(또는 줄여서 *config dict*)를 ProtoBuf 표현" -"으로 직렬화하고, gRPC를 사용하여 클라이언트로 전송한 다음 다시 Python " -"dictionaries로 역직렬화합니다." -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Currently, there is no support for directly sending collection types (e.g., " -"``Set``, ``List``, ``Map``) as values in configuration dictionaries. There " -"are several workarounds to send collections as values by converting them to " -"one of the supported value types (and converting them back on the client-" -"side)." +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -"현재 구성 사전에서 컬렉션 유형(예: ``Set``, ``List``, ``Map``)을 값으로 직접 " -"전송하는 기능은 지원되지 않습니다. 컬렉션을 지원되는 값 유형 중 하나로 변환" -"한 다음 클라이언트 측에서 다시 변환하여 값으로 보내는 몇 가지 해결 방법이 있" -"습니다." -#: ../../source/how-to-configure-clients.rst:26 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and " -"then convert the JSON string back to a list of floating-point numbers on the " -"client." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -"예를 들어 부동 소수점 숫자 목록을 JSON 문자열로 변환한 다음 구성 dictionary" -"을 사용하여 JSON 문자열을 전송한 다음 클라이언트에서 다시 부동 소수점 숫자 목" -"록으로 변환할 수 있습니다." -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "기본 제공 전략을 통한 구성" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"The easiest way to send configuration values to clients is to use a built-in " -"strategy like :code:`FedAvg`. Built-in strategies support so-called " -"configuration functions. A configuration function is a function that the " -"built-in strategy calls to get the configuration dictionary for the current " -"round. It then forwards the configuration dictionary to all the clients " -"selected during that round." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -"클라이언트에 구성 값을 보내는 가장 쉬운 방법은 :code:`FedAvg`와 같은 기본 제" -"공 전략을 사용하는 것입니다. 기본 제공 전략은 소위 구성 함수를 지원합니다. 구" -"성 함수는 내장 전략이 현재 단계의 구성 사전을 가져오기 위해 호출하는 함수입니" -"다. 그런 다음 해당 단계 동안 선택된 모든 클라이언트에 구성 사전을 전달합니다." -#: ../../source/how-to-configure-clients.rst:34 -msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of federated " -"learning, and (c) the number of epochs to train on the client-side. Our " -"configuration function could look like this:" +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +msgid "Start the SuperExec container:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" msgstr "" -"간단한 예부터 시작하겠습니다. (a) 클라이언트가 사용해야 하는 배치 크기, (b) " -"현재 글로벌 연합 라운드, (c) 클라이언트 측에서 학습할 에포크 수를 전송하고 " -"싶다고 가정해 보겠습니다. 구성 함수는 다음과 같습니다:" -#: ../../source/how-to-configure-clients.rst:47 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter :code:" -"`on_fit_config_fn`:" +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -"기본 제공 전략이 이 함수를 사용하도록 하려면 초기화 중에 매개 변수 :code:" -"`on_fit_config_fn`을 사용하여 ``FedAvg``에 이 함수를 전달하면 됩니다:" -#: ../../source/how-to-configure-clients.rst:56 +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." -#: ../../source/how-to-configure-clients.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to " -"send different configuration values to `evaluate` (for example, to use a " -"different batch size)." -msgstr "" -"평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 방식으로 작동합니다. " -"다른 배치 크기를 사용하기 위해 다른 구성 값을 `evaluate`로 보내려고 할 수 있" -"기 때문에 이 함수는 별도의 함수입니다." - -#: ../../source/how-to-configure-clients.rst:69 -msgid "" -"The built-in strategies call this function every round (that is, every time " -"`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling " -"`on_evaluate_config_fn` every round allows us to vary/change the config dict " -"over consecutive rounds. If we wanted to implement a hyperparameter " -"schedule, for example, to increase the number of local epochs during later " -"rounds, we could do the following:" -msgstr "" -"기본 제공 전략은 매 라운드마다 이 함수를 호출합니다(즉, `Strategy." -"configure_fit` 또는 `Strategy.configure_evaluate`가 실행될 때마다). 매 라운드" -"마다 `on_evaluate_config_fn`을 호출하면 연속된 라운드에서 config dict를 변경/" -"변경할 수 있습니다. 예를 들어 이후 라운드에서 로컬 에포크 수를 늘리기 위해 하" -"이퍼파라미터 일정을 구현하려면 다음과 같이 할 수 있습니다:" - -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" +msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "개별 클라이언트 구성" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." +msgstr "" -#: ../../source/how-to-configure-clients.rst:87 -msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" msgstr "" -"경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." -#: ../../source/how-to-configure-clients.rst:89 -msgid "" -"This can be achieved by customizing an existing strategy or by :doc:" -"`implementing a custom strategy from scratch `. " -"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " -"custom ``\"hello\": \"world\"`` configuration key/value pair to the config " -"dict of a *single client* (only the first client in the list, the other " -"clients in this round to not receive this \"special\" config value):" +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -"이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from " -"scratch `를 통해 수행할 수 있습니다. 다음은 사용" -"자 지정 ``\"hello\"'를 추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예" -"입니다: \"world\"`` 구성 키/값 쌍을 *단일 클라이언트*의 config dict에 추가합" -"니다(목록의 첫 번째 클라이언트만, 이 라운드의 다른 클라이언트는 이 \"특별한" -"\" 구성 값을 수신하지 않음):" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "로깅 구성" +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default following a " -"standard message format:" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +msgid "Step 7: Update the Application" msgstr "" -"Flower 로거는 federated 학습 워크로드에서 발생하는 모든 핵심 이벤트를 추적합" -"니다. 기본적으로 표준 메시지 형식에 따라 정보를 표시합니다:" -#: ../../source/how-to-configure-logging.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"containing relevant information including: log message level (e.g. :code:" -"`INFO`, :code:`DEBUG`), a timestamp, the line where the logging took place " -"from, as well as the log message itself. In this way, the logger would " -"typically display information on your terminal as follows:" +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -"로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 " -"줄, 로그 메시지 자체 등 관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적" -"으로 다음과 같은 정보를 터미널에 표시합니다:" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" -msgstr "파일에 로그 저장" +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +msgid "quickstart_docker/task.py" +msgstr "" -#: ../../source/how-to-configure-logging.rst:36 -msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when using " -"the :code:`VirtualClientEngine` (i.e. when you do :code:`fl.simulation." -"start_simulation`). In some situations you might want to save this log to " -"disk. You can do so by calling the `fl.common.logger.configure() `_ function. " -"For example:" -msgstr "" -"기본적으로 Flower 로그는 Federated 학습 워크로드를 실행하는 터미널에 출력됩니" -"다. 이는 gRPC 기반 페더레이션(즉,:code:`fl.simulation.start_simulation`를 실" -"행하는 경우)과 :code:`VirtualClientEngine`을 사용하는 경우(즉, :코드:`fl." -"simulation.start_simulation`을 실행하는 경우) 모두에 적용됩니다. 경우에 따라 " -"이 로그를 디스크에 저장하고 싶을 수도 있습니다. 이 경우 `fl.common.logger." -"configure() `_ 함수를 호출하여 저장할 수 있습니다. 예를 들어:" - -#: ../../source/how-to-configure-logging.rst:53 -msgid "" -"With the above, Flower will record the log you see on your terminal to :code:" -"`log.txt`. This file will be created in the same directory as were you are " -"running the code from. If we inspect we see the log above is also recorded " -"but prefixing with :code:`identifier` each line:" -msgstr "" -"위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니" -"다. 이 파일은 코드를 실행한 디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보" -"면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 붙는 것을 확" -"인할 수 있습니다:" - -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "나만의 메시지 기록" +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +#, fuzzy +msgid "Stop the current ClientApp containers:" +msgstr "현재 클라이언트 속성입니다." -#: ../../source/how-to-configure-logging.rst:76 -msgid "" -"You might expand the information shown by default with the Flower logger by " -"adding more messages relevant to your application. You can achieve this " -"easily as follows." +#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "기본 이미지 빌드" + +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -"애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 " -"정보를 확장할 수 있습니다. 다음과 같이 쉽게 추가할 수 있습니다." -#: ../../source/how-to-configure-logging.rst:102 -msgid "" -"In this way your logger will show, in addition to the default messages, the " -"ones introduced by the clients as specified above." +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -"이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메" -"시지가 표시됩니다." -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" -msgstr "원격 서비스에 로그인" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" +msgstr "" -#: ../../source/how-to-configure-logging.rst:130 -msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a " -"host to which logs can be pushed (via :code:`POST`) through a native Python :" -"code:`logging.handler.HTTPHandler`. This is a particularly useful feature " -"in :code:`gRPC`-based Federated Learning workloads where otherwise gathering " -"logs from all entities (i.e. the server and the clients) might be " -"cumbersome. Note that in Flower simulation, the server automatically " -"displays all logs. You can still specify a :code:`HTTPHandler` should you " -"wish to backup or analyze the logs somewhere else." +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -"또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python :" -"code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정" -"할 수 있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버 및 클라이언트)" -"에서 로그를 수집하는 것이 번거로울 수 있는 :code:`gRPC` 기반 Federated 학습 " -"워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 서버가 모든 로그" -"를 자동으로 표시합니다. 로그를 다른 곳에 백업하거나 분석하려는 경우 :code:" -"`HTTPHandler`를 지정할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "SSL 연결 사용" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +#, fuzzy +msgid "Where to Go Next" +msgstr "시작 위치" -#: ../../source/how-to-enable-ssl-connections.rst:4 -msgid "" -"This guide describes how to a SSL-enabled secure Flower server (:code:" -"`SuperLink`) can be started and how a Flower client (:code:`SuperNode`) can " -"establish a secure connections to it." +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -"이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하" -"는 방법과 Flower 클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하" -"는 방법을 설명합니다." -#: ../../source/how-to-enable-ssl-connections.rst:7 -msgid "" -"A complete code example demonstrating a secure connection can be found `here " -"`_." +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" msgstr "" -"보안 연결을 보여주는 전체 코드 예제는 '여기 `_'에서 확인할 수 있습니다." -#: ../../source/how-to-enable-ssl-connections.rst:10 -msgid "" -"The code example comes with a :code:`README.md` file which explains how to " -"start it. Although it is already SSL-enabled, it might be less descriptive " -"on how it does so. Stick to this guide for a deeper introduction to the " -"topic." +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -"코드 예제에는 시작 방법을 설명하는 :code:`README.md` 파일이 함께 제공됩니다. " -"이미 SSL을 사용하도록 설정되어 있지만 그 방법에 대한 설명이 부족할 수 있습니" -"다. 이 가이드를 참고하여 이 주제에 대해 자세히 알아보세요." -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "인증서" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +#, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "빠른 시작 튜토리얼" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate " -"self-signed certificates. As this can become quite complex we are going to " -"ask you to run the script in :code:`examples/advanced-tensorflow/" -"certificates/generate.sh` with the following command sequence:" +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -"SSL 사용 연결을 사용하려면 서버와 클라이언트에 인증서를 전달해야 합니다. 이 " -"가이드에서는 자체 서명된 인증서를 생성하겠습니다. 이 과정은 상당히 복잡할 수 " -"있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-tensorflow/" -"certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"This will generate the certificates in :code:`examples/advanced-tensorflow/." -"cache/certificates`." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -"이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서" -"가 생성됩니다." -#: ../../source/how-to-enable-ssl-connections.rst:31 -msgid "" -"The approach for generating SSL certificates in the context of this example " -"can serve as an inspiration and starting point, but it should not be used as " -"a reference for production environments. Please refer to other sources " -"regarding the issue of correctly generating certificates for production " -"environments. For non-critical prototyping or research projects, it might be " -"sufficient to use the self-signed certificates generated using the scripts " -"mentioned in this guide." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -"이 예의 맥락에서 SSL 인증서를 생성하는 접근 방식은 영감과 출발점이 될 수 있지" -"만 프로덕션 환경에 대한 참조로 사용해서는 안 됩니다. 프로덕션 환경용 인증서" -"를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프" -"로토타이핑 또는 연구 프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 " -"생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." - -#: ../../source/how-to-enable-ssl-connections.rst:39 -msgid "Server (SuperLink)" -msgstr "서버(SuperLink)" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses " -"the previously generated certificates:" +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -"다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)" -"를 시작합니다:" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private " -"key." +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -"인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증" -"서, 서버 인증서 및 서버 개인 키입니다." -#: ../../source/how-to-enable-ssl-connections.rst:54 -msgid "Client (SuperNode)" -msgstr "클라이언트(SuperNode)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +#, fuzzy +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Flower SuperNode를 실행합니다." -#: ../../source/how-to-enable-ssl-connections.rst:56 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" -"Use the following terminal command to start a client (SuperNode) that uses " -"the previously generated certificates:" +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -"다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트" -"(SuperNode)를 시작합니다:" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" -"When setting :code:`root_certificates`, the client expects a file path to " -"PEM-encoded root certificates." +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -"코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 " -"파일 경로를 예상합니다." -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"You should now have learned how to generate self-signed certificates using " -"the given script, start an SSL-enabled server and have a client establish a " -"secure connection to it." +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 " -"시작하고, 클라이언트가 보안 연결을 설정하는 방법을 배웠을 것입니다." -#: ../../source/how-to-enable-ssl-connections.rst:75 -msgid "Additional resources" -msgstr "추가 리소스" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#, fuzzy +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"These additional sources might be relevant if you would like to dive deeper " -"into the topic of certificates:" -msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" -msgstr "'암호화하세요 `_'" - -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" -msgstr "`인증봇 `_" - -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" -msgstr "전략 구현" - -#: ../../source/how-to-implement-strategies.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" -"The strategy abstraction enables implementation of fully custom strategies. " -"A strategy is basically the federated learning algorithm that runs on the " -"server. Strategies decide how to sample clients, how to configure clients " -"for training, how to aggregate updates, and how to evaluate models. Flower " -"provides a few built-in strategies which are based on the same API described " -"below." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -"전략 추상화를 통해 완전한 맞춤형 전략을 구현할 수 있습니다. 전략은 " -"기본적으로 서버에서 실행되는 연합 학습 알고리즘입니다. 전략은 클라이언트를 " -"샘플링하는 방법, 학습을 위해 클라이언트를 구성하는 방법, 업데이트를 집계하는 " -"방법, 모델을 평가하는 방법을 결정합니다. Flower는 아래에 설명된 것과 동일한 " -"API를 기반으로 하는 몇 가지 기본 제공 전략을 제공합니다." - -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr ":code:`Strategy` 추상화" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"All strategy implementation are derived from the abstract base class :code:" -"`flwr.server.strategy.Strategy`, both built-in implementations and third " -"party implementations. This means that custom strategy implementations have " -"the exact same capabilities at their disposal as built-in ones." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -"모든 전략 구현은 기본 제공 구현과 타사 구현 모두 추상 기본 클래스인 :code:" -"`flwr.server.strategy.Strategy`에서 파생됩니다. 즉, 사용자 정의 전략 구현은 " -"기본 제공 구현과 완전히 동일한 기능을 사용할 수 있습니다." - -#: ../../source/how-to-implement-strategies.rst:18 -msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" -msgstr "전략 추상화에서는 구현해야 하는 몇 가지 추상적인 메서드를 정의합니다:" -#: ../../source/how-to-implement-strategies.rst:75 -msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived from " -"the abstract base class :code:`Strategy`) that implements for the previously " -"shown abstract methods:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -"새 전략을 생성한다는 것은 이전에 표시된 추상 메서드에 대해 구현하는 새로운 :" -"code:`class`(추상 기본 클래스 :code:`Strategy`에서 파생됨)를 구현하는 것을 의" -"미합니다:" - -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Flower 서버는 다음 순서로 이러한 메서드를 호출합니다:" - -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "다음 섹션에서는 이러한 각 방법에 대해 자세히 설명합니다." -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr ":code:`initialize_parameters` 메서드" - -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -":code:`initialize_parameters` is called only once, at the very beginning of " -"an execution. It is responsible for providing the initial global model " -"parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -"code:`initialize_parameters`는 실행을 처음 시작할 때 한 번만 호출됩니다. 이 " -"함수는 초기 전역 모델 파라미터를 직렬화된 형식(즉, :code:`Parameters` 객체)으" -"로 제공하는 역할을 합니다." -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"Built-in strategies return user-provided initial parameters. The following " -"example shows how initial parameters can be passed to :code:`FedAvg`:" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -"기본 제공 전략은 사용자가 제공한 초기 매개 변수를 반환합니다. 다음 예는 초기 " -"매개 변수를 :code:`FedAvg`에 전달하는 방법을 보여줍니다:" -#: ../../source/how-to-implement-strategies.rst:209 -msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or :" -"code:`None`. If no parameters are returned from :code:" -"`initialize_parameters` (i.e., :code:`None`), the server will randomly " -"select one client and ask it to provide its parameters. This is a " -"convenience feature and not recommended in practice, but it can be useful " -"for prototyping. In practice, it is recommended to always use server-side " -"parameter initialization." -msgstr "" -"Flower 서버는 :code:`initialize_parameters`를 호출하여 :code:" -"`initial_parameters`에 전달된 파라미터를 반환하거나 :code:`None`을 반환합니" -"다. :code:`initial_parameters`에서 반환되는 매개변수가 없는 경우(즉, :code:" -"`None`) 서버는 무작위로 클라이언트 하나를 선택하여 해당 클라이언트에 매개변수" -"를 제공하도록 요청합니다. 이는 편의 기능이며 실제로는 권장하지 않지만 프로토" -"타이핑에는 유용할 수 있습니다. 실제로는 항상 서버 측 매개변수 초기화를 사용하" -"는 것이 좋습니다." - -#: ../../source/how-to-implement-strategies.rst:213 -msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint. It " -"is also the fundamental capability needed to implement hybrid approaches, " -"for example, to fine-tune a pre-trained model using federated learning." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -"서버 측 파라미터 초기화는 강력한 메커니즘입니다. 예를 들어 이전에 저장한 " -"체크포인트에서 학습을 재개하는 데 사용할 수 있습니다. 또한 연합 학습을 " -"사용하여 사전 학습된 모델을 미세 조정하는 등 하이브리드 접근 방식을 구현하는 " -"데 필요한 기본 기능입니다." -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr ":code:`configure_fit` 메서드" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" +msgstr "" -#: ../../source/how-to-implement-strategies.rst:218 -msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round of " -"training. What does *configure* mean in this context? Configuring a round " -"means selecting clients and deciding what instructions to send to these " -"clients. The signature of :code:`configure_fit` makes this clear:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -":code:`configure_fit`은 다가오는 학 라운드를 구성하는 역할을 합니다. 이 문맥" -"에서 *구성*은 무엇을 의미하나요? 라운드를 구성한다는 것은 클라이언트를 선택하" -"고 이 클라이언트에게 어떤 지침을 보낼지 결정하는 것을 의미합니다. code:" -"`configure_fit`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-implement-strategies.rst:231 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations usually " -"perform the following steps in :code:`configure_fit`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 " -"나타냅니다. 전략 구현은 일반적으로 :code:`configure_fit`에서 다음 단계를 " -"수행합니다:" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 -msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "Step 4: Update the Application" msgstr "" -":code:`client_manager`를 사용하여 사용 가능한 모든 클라이언트(또는 그 하위 집" -"합)를 무작위로 샘플링합니다(각각 :code:`ClientProxy` 개체로 표시됨)" -#: ../../source/how-to-implement-strategies.rst:234 -msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." msgstr "" -"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:" -"`config` dict를 보유한 동일한 :code:`FitIns`와 쌍을 이룹니다" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate in a " -"round if the corresponding :code:`ClientProxy` is included in the list " -"returned from :code:`configure_fit`." +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -"보다 정교한 구현은 :code:`configure_fit`을 사용하여 사용자 지정 클라이언트 선" -"택 로직을 구현할 수 있습니다. 클라이언트는 :code:`configure_fit`에서 반환된 " -"목록에 해당 :code:`ClientProxy`가 포함된 경우에만 라운드에 참여합니다." -#: ../../source/how-to-implement-strategies.rst:240 -msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies to " -"train, for example, different models on different clients, or use different " -"hyperparameters on different clients (via the :code:`config` dict)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. instructions은 " -"클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 전송할 수 " -"있습니다. 이를 통해 예를 들어 클라이언트마다 다른 모델을 학습시키거나 " -"클라이언트마다 다른 하이퍼파라미터를 사용하는 사용자 지정 전략을 사용할 수 " -"있습니다(:code:`config` dict를 통해)." -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr ":code:`aggregate_fit` 메서드" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +#, fuzzy +msgid "Rebuild and restart the services." +msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned by " -"the clients that were selected and asked to train in :code:`configure_fit`." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"code:`aggregate_fit`은 :code:`configure_fit`에서 훈련하도록 선택되고 요청된 " -"클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-implement-strategies.rst:258 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via :code:" -"`configure_fit`). :code:`aggregate_fit` therefore receives a list of :code:" -"`results`, but also a list of :code:`failures`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과" -"를 얻을 수 있다는 보장은 없습니다(:code:`configure_fit`을 통해). 따라서 :" -"code:`aggregate_fit`은 :code:`results` 목록뿐만 아니라 :code:`failures` 목록" -"도 받습니다." -#: ../../source/how-to-implement-strategies.rst:260 -msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a " -"dictionary of aggregated metrics. The :code:`Parameters` return value is " -"optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -"code:`aggregate_fit`은 선택적 :code:`Parameters` 개체와 집계된 메트릭의 " -"dictionary를 반환합니다. :code:`Parameters` 반환 값은 :code:`aggregate_fit`" -"이 제공된 결과가 집계에 충분하지 않다고 판단할 수 있으므로(예: 실패 수가 너" -"무 많음) 선택 사항입니다." - -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr ":code:`configure_evaluate` 메서드" -#: ../../source/how-to-implement-strategies.rst:265 -msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming round " -"of evaluation. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_evaluate` makes this clear:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" msgstr "" -":code:`configure_evaluate`는 다가오는 평가 라운드를 구성하는 역할을 합니다. " -"이 문맥에서 *구성*은 무엇을 의미하나요? 라운드를 구성한다는 것은 클라이언트" -"를 선택하고 이러한 클라이언트에 전송할 지침을 결정하는 것을 의미합니다. :" -"code:`configure_evaluate`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations usually " -"perform the following steps in :code:`configure_evaluate`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 " -"나타냅니다. 전략 구현은 일반적으로 :code:`configure_evaluate`에서 다음 " -"단계를 수행합니다:" -#: ../../source/how-to-implement-strategies.rst:281 -msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:" -"`config` dict를 보유한 동일한 :code:`EvaluateIns`와 쌍을 이룹니다" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate in a " -"round if the corresponding :code:`ClientProxy` is included in the list " -"returned from :code:`configure_evaluate`." +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"보다 정교한 구현은 :code:`configure_evaluate`를 사용하여 사용자 지정 클라이언" -"트 선택 로직을 구현할 수 있습니다. 클라이언트는 :code:`configure_evaluate`에" -"서 반환된 목록에 해당 :code:`ClientProxy`가 포함된 경우에만 라운드에 참여합니" -"다." -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies to " -"evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` dict)." -msgstr "" -"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. 명령어는 " -"클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 전송할 수 " -"있습니다. 이를 통해 사용자 지정 전략을 통해 예를 들어 클라이언트마다 다른 " -"모델을 평가하거나 클라이언트마다 다른 하이퍼파라미터를 사용할 수 " -"있습니다(:code:`config` dict를 통해)." - -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr ":code:`aggregate_evaluate` 메서드" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." +msgstr "" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in :code:" -"`configure_evaluate`." +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -"code:`aggregate_evaluate`는 :code:`configure_evaluate`에서 선택되어 평가를 요" -"청한 클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." -#: ../../source/how-to-implement-strategies.rst:306 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via :code:" -"`configure_evaluate`). :code:`aggregate_evaluate` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" msgstr "" -"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과" -"를 얻을 수 있다는 보장은 없습니다(:code:`configure_evaluate`를 통해). 따라" -"서 :code:`aggregate_evaluate`는 :code:`results` 목록뿐만 아니라 :code:" -"`failures` 목록도 받습니다." -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a " -"dictionary of aggregated metrics. The :code:`float` return value is optional " -"because :code:`aggregate_evaluate` might decide that the results provided " -"are not sufficient for aggregation (e.g., too many failures)." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -"code:`aggregate_evaluate`는 선택적 :code:`float`(손실)와 집계된 메트릭의 " -"dictionary를 반환합니다. code:`float` 반환 값은 :code:`aggregate_evaluate`가 " -"제공된 결과가 집계에 충분하지 않다고 판단할 수 있으므로(예: 실패 수가 너무 많" -"음) 선택 사항입니다." - -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr ":code:`evaluate` 메서드" -#: ../../source/how-to-implement-strategies.rst:313 -msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to :code:" -"`configure_evaluate`/:code:`aggregate_evaluate` enables strategies to " -"perform both servers-side and client-side (federated) evaluation." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -":code:`evaluate`는 서버 측에서 모델 매개변수를 평가하는 역할을 담당합니다. " -"code:`configure_evaluate`/:code:`aggregate_evaluate`와 함께 :code:`evaluate`" -"를 사용하면 서버 측과 클라이언트 측(federated) 평가를 모두 수행할 수 있는 전" -"략을 사용할 수 있습니다." -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"The return value is again optional because the strategy might not need to " -"implement server-side evaluation or because the user-defined :code:" -"`evaluate` method might not complete successfully (e.g., it might fail to " -"load the server-side evaluation data)." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -"반환 값은 전략에서 서버 측 평가를 구현할 필요가 없거나 사용자 정의 :code:" -"`evaluate` 메서드가 성공적으로 완료되지 않을 수 있기 때문에(예: 서버 측 평가 " -"데이터를 로드하지 못할 수 있음) 다시 선택 사항으로 설정할 수 있습니다." -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" -msgstr "Flower 설치" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" +msgstr "" -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" -msgstr "Python 버전" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" +msgstr "" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "안정적인 릴리즈 설치" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 +msgid "" +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." +msgstr "" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" -msgstr "pip 사용" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" +msgstr "" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"Stable releases are available on `PyPI `_::" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -"안정적인 릴리즈는 `PyPI `_:: 에서 확인할 수 " -"있습니다::" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr``을 " -"``simulation``extra와 함께 설치해야 합니다:" -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" -msgstr "conda(또는 mamba) 사용" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" +msgstr "" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." -msgstr "Flower은 'conda-forge' 채널에서도 설치할 수 있습니다." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" +msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first need " -"to run the following::" -msgstr "채널에 'conda-forge'를 추가하지 않은 경우 먼저 다음을 실행해야 합니다:" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." +msgstr "" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed " -"with ``conda``::" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -"conda-forge`` 채널이 활성화되면 ``flwr``을 ``conda``로 설치할 수 있습니다::" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" -msgstr "또는 ``mamba``::" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" +msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "설치 확인" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" +msgstr "" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to " -"the command line::" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -"다음 명령을 사용하여 Flower가 성공적으로 설치되었는지 확인할 수 있습니다. 모" -"든 것이 정상적으로 작동하면 명령줄에 Flower의 버전이 출력됩니다:" - -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "고급 설치 옵션" -#: ../../source/how-to-install-flower.rst:58 -msgid "Install via Docker" -msgstr "Docker를 통해 설치" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 +msgid "" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." +msgstr "" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -":doc:`Docker를 사용하여 Flower를 실행하는 방법 `" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "사전 릴리즈 설치" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" +msgstr "" -#: ../../source/how-to-install-flower.rst:65 -msgid "" -"New (possibly unstable) versions of Flower are sometimes available as pre-" -"release versions (alpha, beta, release candidate) before the stable release " -"happens::" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -"새(불안정할 수 있는) 버전의 Flower는 안정 버전이 출시되기 전에 사전 릴리즈 버" -"전(알파, 베타, 릴리즈 후보)으로 제공되는 경우가 있습니다:" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases " -"should be installed with the ``simulation`` extra::" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr`` 사전 릴리즈를 " -"``simulation`` extra와 함께 설치해야 합니다:" - -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "야간 릴리즈 설치" -#: ../../source/how-to-install-flower.rst:76 -msgid "" -"The latest (potentially unstable) changes in Flower are available as nightly " -"releases::" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -"Flower의 최신 (불안정할 수 있는) 변경 사항은 다음과 같이 야간 릴리즈로 제공됩" -"니다:" -#: ../../source/how-to-install-flower.rst:80 -msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` should " -"be installed with the ``simulation`` extra::" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" msgstr "" -"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 " -"``simulation`` extr와 함께 설치해야 합니다::" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "모니터 시뮬레이션" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#, fuzzy +msgid "Restart the services:" +msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:4 -msgid "" -"Flower allows you to monitor system resources while running your simulation. " -"Moreover, the Flower simulation engine is powerful and enables you to decide " -"how to allocate resources per client manner and constrain the total usage. " -"Insights from resource consumption can help you make smarter decisions and " -"speed up the execution time." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -"Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 리소스를 모니터링할 수 있" -"습니다. 또한 Flower 시뮬레이션 엔진은 강력하며 클라이언트별 리소스 할당 방법" -"을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통" -"해 더 현명한 결정을 내리고 실행 시간을 단축할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"The specific instructions assume you are using macOS and have the `Homebrew " -"`_ package manager installed." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -"구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관" -"리자가 설치되어 있다고 가정합니다." - -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "다운로드" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"`Prometheus `_ is used for data collection, while " -"`Grafana `_ will enable you to visualize the collected " -"data. They are both well integrated with `Ray `_ which " -"Flower uses under the hood." +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -"`Prometheus `_는 데이터 수집에 사용되며, `Grafana " -"`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 " -"도구는 모두 Flower가 내부적으로 사용하는 `Ray `_와 잘 통" -"합되어 있습니다." -#: ../../source/how-to-monitor-simulation.rst:18 -msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" +msgstr "" -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "M1 Mac을 사용 중이라면:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#, fuzzy +msgid "Remove all services and volumes:" +msgstr "R에서 모든 항목을 제거합니다." -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "이전 세대 Intel Mac 장치에서는:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "빠른 시작 튜토리얼" -#: ../../source/how-to-monitor-simulation.rst:34 -msgid "" -"Open the respective configuration files and change them. Depending on your " -"device, use one of the two following commands:" -msgstr "" -"각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니" -"다:" +#: ../../source/docker/use-a-different-version.rst:2 +#, fuzzy +msgid "Use a Different Flower Version" +msgstr "다른 Flower 버전 사용" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"and then delete all the text in the file and paste a new Prometheus config " -"you see below. You may adjust the time intervals to your requirements:" +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -"를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정" -"을 붙여넣습니다. 요구 사항에 따라 시간 간격을 조정할 수 있습니다:" +"다른 버전의 Flower를 사용하려면 태그를 변경하여 사용할 수 있습니다(예: Flower nightly). 사용 가능한 모든 " +"버전은 `Docker Hub `__에 있습니다." -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/docker/use-a-different-version.rst:10 +#, fuzzy msgid "" -"Now after you have edited the Prometheus configuration, do the same with the " -"Grafana configuration files. Open those using one of the following commands " -"as before:" +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 " -"수행합니다. 이전과 마찬가지로 다음 명령 중 하나를 사용하여 파일을 엽니다:" +"SuperNode Docker 이미지는 현재 1.9.0 야간 릴리스에서만 작동합니다. 안정 버전은 Flower 1.9.0(안정)이 " +"출시되면 사용할 수 있습니다(예상 출시일: 5월). SuperNode 야간 이미지는 같은 날 릴리스된 해당 SuperLink 및 " +"서버앱 야간 이미지와 페어링되어야 합니다. 버전이 동기화되도록 하려면 ``nightly`` 대신 " +"``1.9.0.dev20240501``과 같은 구체적인 태그를 사용하는 것이 좋습니다." -#: ../../source/how-to-monitor-simulation.rst:69 -msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." -msgstr "" -"터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "예시: PyTorch에서 FedBN - 중앙 집중식에서 연합식으로" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Congratulations, you just downloaded all the necessary software needed for " -"metrics tracking. Now, let’s start it." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -"축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이" -"제 시작해 보겠습니다." +"이 튜토리얼에서는 non-iid data를 위해 설계된 federated 훈련 전략인 `FedBN " +"`_으로 기존 머신러닝 워크로드의 federated 버전을 구축하기 " +"위해 Flower를 사용하는 방법을 보여드립니다. 우리는 PyTorch를 사용하여 CIFAR-10 데이터 세트에서 컨볼루션 " +"신경망(일괄 정규화 레이어 포함)을 훈련하고 있습니다. FedBN을 적용할 때, :doc:`예제: 파이토치 -중앙 집중식에서 " +"연합식으로 ` 와 비교했을 때 몇 가지 사항만 " +"변경 하면 됩니다." -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "매트릭 트래킹" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" +msgstr "중앙 집중식 훈련" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 +#, fuzzy msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -"Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해" -"야 합니다." +"모든 파일은 :doc:`예제: 파이토치 -중앙 집중식에서 연합식으로 `를 기반으로 수정합니다. :code:`cifar.py`라는 파일을 수정하기만 하면 되며, 수정된 부분은 " +"아래와 같습니다:" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"Please include the following argument in your Python code when starting a " -"simulation." -msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "Net() 클래스에 정의된 모델 아키텍처는 그에 따라 배치 정규화 레이어가 추가됩니다." -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "이제 워크로드를 시작할 준비가 되었습니다." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" +msgstr "이제 머신 러닝 워크로드를 실행할 수 있습니다:" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"Shortly after the simulation starts, you should see the following logs in " -"your terminal:" +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -"시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" +"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 사용하여 " +"FedBN 내에서 하나의 서버와 두 개의 클라이언트로 구성된 연합학습 시스템을 만들어 보겠습니다." -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "``_ 에서 모든 것을 볼 수 있습니다." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" +msgstr "연합 훈련" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 +#, fuzzy msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the " -"lowest option)." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -"Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니" -"다." +":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 읽었다면, 다음 부분은 쉽게 따라할 수 있으며 :code:`client.py`의 " +":code:`get_parameters`와 :code:`set_parameters` 함수만 수정해야 합니다. 그렇지 않은 경우 " +":doc:`예제: 파이토치 - 중앙 집중식에서 연합식으로 `를 먼저 읽어보세요." -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +#, fuzzy msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the right-" -"up corner, “View in Grafana”. Please note that the Ray dashboard is only " -"accessible during the simulation. After the simulation ends, you can only " -"use Grafana to explore the metrics. You can start Grafana by going to " -"``http://localhost:3000/``." +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" -"또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 클릭하여 Grafana에서 바로 확인" -"할 수도 있습니다. Ray 대시보드는 시뮬레이션 중에만 액세스할 수 있다는 점에 유" -"의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " -"있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." +"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. FedBN에서 :code:`server.py`는 변경되지 않고 " +"그대로 유지되므로 서버를 바로 시작할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -"시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 " -"중인 동안 컴퓨터에서 포트 :code:`3000` 등을 차단하므로 이 작업이 중요합니다." +"마지막으로, :code:`client.py`에서 :code:`get_parameters` 및 " +":code:`set_parameters`를 변경하여 *client* 로직을 수정할 것입니다. 서버로 보내거나 서버에서 받을 때 모델" +" 파라미터 목록에서 배치 정규화 파라미터를 제외할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "리소스 할당" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" -"You must understand how the Ray library works to efficiently allocate system " -"resources to simulation clients on your own." +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -"Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리" -"소스를 효율적으로 할당할 수 있습니다." +"를 입력하고(클릭하기 전에 서버가 계속 실행 중인지 확인하세요), (이전에 중앙 집중된) PyTorch 프로젝트가 두 클라이언트에서" +" FedBN으로 연합 학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/how-to-monitor-simulation.rst:136 -msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of them, " -"nor that the model training happens at all of them simultaneously. You will " -"learn more about that in the later part of this blog. You can check the " -"system resources by running the following:" -msgstr "" -"처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서 사용 가능" -"한 모든 리소스를 사용하여 시작되며, 이 리소스는 클라이언트 간에 공유됩니다. " -"그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동" -"시에 모델 학습이 이루어지는 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그" -"의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" - -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" +msgstr "다음 단계" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do the " -"following (you don't need to overwrite all of them):" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -"그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다" -"(모두 덮어쓸 필요는 없음):" +"이 예제의 전체 소스 코드는 '여기 `_'에서 확인할 수 있습니다. 물론 이 예제는 두 " +"클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 " +"주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요?" +" 클라이언트를 더 추가하는 것은 어떨까요?" -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "예제: 파이토치 - 중앙 집중식에서 연합식으로" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has all " -"the required resources (such that they run in parallel) when the resources " -"allow." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리" -"소스가 있을 때(병렬로 실행되는 등) 새 클라이언트를 시작합니다." +"이 튜토리얼에서는 Flower를 사용해 기존 머신 러닝 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. 여기서는 " +"PyTorch를 사용해 CIFAR-10 데이터 세트에서 컨볼루션 신경망을 훈련합니다. 먼저, 'PyTorch로 딥 러닝 " +"`_ " +"튜토리얼을 기반으로 centralized 학습 접근 방식을 사용하여 이 머신 러닝 작업을 소개합니다. 그런 다음 " +"centralized 훈련 코드를 기반으로 연합 방식 훈련을 실행합니다." -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"In the example above, only one client will be run, so your clients won't run " -"concurrently. Setting :code:`client_num_gpus = 0.5` would allow running two " -"clients and therefore enable them to run concurrently. Be careful not to " -"require more resources than available. If you specified :code:" -"`client_num_gpus = 2`, the simulation wouldn't start (even if you had 2 GPUs " -"but decided to set 1 in :code:`ray_init_args`)." +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -"위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않" -"습니다. :code:`client_num_gpus = 0.5` 를 설정하면 두 개의 클라이언트를 실행" -"할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소" -"스를 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬" -"레이션이 시작되지 않습니다(GPU가 2개이지만 :code:`ray_init_args`에서 1개를 설" -"정한 경우에도 마찬가지입니다)." - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "자주 묻는 질문" - -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "질문: 기록된 메트릭이 보이지 않습니다." +"중앙 집중식 CNN 트레이닝 코드에 대한 간략한 설명부터 시작하겠습니다. 무슨 일이 일어나고 있는지 더 자세히 설명하려면 공식 " +"`PyTorch 튜토리얼 " +"`_을 " +"참조하세요." -#: ../../source/how-to-monitor-simulation.rst:216 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"A: The timeframe might not be properly set. The setting is in the top right " -"corner (\"Last 30 minutes\" by default). Please change the timeframe to " -"reflect the period when the simulation was running." +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -"A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다" -"(기본값은 '지난 30분'). 시뮬레이션이 실행된 기간을 반영하도록 기간을 변경해 " -"주세요." +"CIFAR-10에 대한 기존 (중앙 집중식) 교육에 필요한 모든 구성 요소가 포함된 :code:`cifar.py`라는 새 파일을 " +"생성해 보겠습니다. 먼저, 필요한 모든 패키지(예: :code:`torch` 및 :code:`torchvision`)를 가져와야 " +"합니다. 연합 학습을 위한 패키지를 가져오지 않는 것을 확인 할 수 있습니. 나중에 연합 학습 구성 요소를 추가할 때에도 이러한 " +"모든 가져오기를 그대로 유지할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana server " -"is running and refresh this page” after going to the Metrics tab in Ray " -"Dashboard." +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -"질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동" -"한 후 Grafana 서버가 실행 중인지 확인하고 이 페이지를 새로고침하세요.\"라는 " -"메시지가 표시됩니다." +"이미 언급했듯이 이 머신 러닝 워크로드에는 CIFAR-10 데이터 세트를 사용합니다. 모델 아키텍처(매우 간단한 컨볼루션 신경망)는" +" :code:`class Net()`에 정의되어 있습니다." -#: ../../source/how-to-monitor-simulation.rst:220 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"A: You probably don't have Grafana running. Please check the running services" +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -"A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" +":code:`load_data()` 함수는 CIFAR-10 훈련 및 테스트 세트를 로드합니다. :code:`transform`은 " +"로드 후 데이터를 정규화합니다." -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -"Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다." -"\"라는 메시지가 표시됩니다." +"이제 학습 집합을 반복하고, 손실을 측정하고, 이를 역전파한 다음 각 학습 예제 배치에 대해 하나의 최적화 단계를 수행하는 " +"학습(함수 :code:`train()`)을 정의해야 합니다." -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy msgid "" -"A: Either the simulation has already finished, or you still need to start " -"Prometheus." -msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." +msgstr "" +"모델 평가는 :code:`test()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 샘플을 반복하고 테스트 데이터 세트에 따라" +" 모델의 손실을 측정합니다." -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "리소스" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으면 모든 것을 종합하여 CIFAR-10에서 CNN을 훈련할 수 있습니다." -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -"Ray Dashboard: ``_" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -"Ray 대시보드: ``_" - -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" -msgstr "Ray 메트릭: ``_" - -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" -msgstr "Docker를 사용하여 Flower 실행" +"지금까지는 파이토치를 사용해 본 적이 있다면 상당히 익숙하게 보일 것입니다. 다음 단계로 넘어가서 구축한 것을 사용하여 하나의 " +"서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"The simplest way to get started with Flower is by using the pre-made Docker " -"images, which you can find on `Docker Hub `__. Supported architectures include ``amd64`` and ``arm64v8``." +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -"Flower를 시작하는 가장 간단한 방법은 `Docker Hub `__에서 찾을 수 있는 미리 만들어진 Docker 이미지를 사용하는 것입니다. 지" -"원되는 아키텍처는 ``amd64`` 및 ``arm64v8``입니다." - -#: ../../source/how-to-run-flower-using-docker.rst:8 -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" +"이전 섹션에서 설명한 간단한 머신 러닝 프로젝트는 단일 데이터 세트(CIFAR-10)로 모델을 학습시키는데, 이를 중앙 집중식 " +"학습이라고 부릅니다. 이전 섹션에서 설명한 중앙 집중식 학습의 개념은 대부분 알고 계실 것이며, 많은 분들이 이전에 사용해 보셨을 " +"것입니다. 일반적으로 머신 러닝 워크로드를 연합 방식으로 실행하려면 대부분의 코드를 변경하고 모든 것을 처음부터 다시 설정해야 " +"합니다. 이는 상당한 노력이 필요할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:15 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" -"If you do not see the version of Docker but instead get an error saying that " -"the command was not found, you will need to install Docker first. You can " -"find installation instruction `here `_." -msgstr "" -"전이 표시되지 않고 대신 명령을 찾을 수 없다는 오류가 표시되는 경우 먼저 " -"Docker를 설치해야 합니다. `여기 `_에서 " -"설치 지침을 찾을 수 있습니다." +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "하지만 Flower를 사용하면 대대적인 재작성 없이도 기존 코드를 연합 학습 설정으로 발전시킬 수 있습니다." + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +#, fuzzy +msgid "" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." +msgstr "" +"개념은 이해하기 쉽습니다. *서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 :code:`cifar.py`의 코드를 " +"사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. *클라이언트*는 학습을 실행하고 파라미터를 업데이트합니다. " +"업데이트된 파라미터는 *서버*로 다시 전송되며, *서버*는 수신된 모든 파라미터 업데이트의 평균을 구합니다. 이것은 연합 학습 " +"프로세스의 한 라운드를 설명하며 여러 라운드에 걸쳐 이 과정을 반복합니다." + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +#, fuzzy +msgid "" +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." +msgstr "" +"이 예제는 하나의 *서버*와 두 개의 *클라이언트*로 구성됩니다. 먼저 :code:`server.py`를 설정해 보겠습니다. " +"*server*는 Flower 패키지 :code:`flwr`를 가져와야 합니다. 다음으로, :code:`start_server` " +"함수를 사용하여 서버를 시작하고 세 차례의 연합 학습을 수행하도록 지시합니다." + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" +msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/how-to-run-flower-using-docker.rst:21 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#, fuzzy msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to avoid " -"using ``sudo``, you can follow the `Post-installation steps `_ on the official Docker " -"website." +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -"Linux에서 Docker 명령을 실행하려면 ``sudo`` 권한이 필요합니다. ``sudo`` 를 " -"사용하지 않으려면 공식 Docker 웹사이트의 `Post-installation steps " -"`_를 따르세요." +"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`cifar.py`에서 이전에 정의한 " +"중앙 집중식 학습을 기반으로 구축합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, PyTorch 모델의 파라미터를 " +"업데이트하기 위해 :code:`torch`도 가져와야 합니다:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 +#, fuzzy +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" +msgstr "" +"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " +":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 우리의 구현은 " +":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`CifarClient`라고 부를 " +"것입니다. :code:`NumPyClient`는 파이토치나 텐서플로우/Keras처럼 NumPy 상호운용성이 좋은 프레임워크를 " +"사용하는 경우 필요한 일부 보일러플레이트를 피하기 때문에 :code:`Client`보다 구현하기가 조금 더 쉽습니다. " +"code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 테스트를" +" 위한 메서드 1개 등 네 가지 메서드를 구현해야 합니다:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr ":code:`set_parameters`" -#: ../../source/how-to-run-flower-using-docker.rst:27 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode " -"and ServerApp image must have the same version when running together. This " -"guarantees seamless integration and avoids potential conflicts or issues " -"that may arise from using different versions." -msgstr "" -"최적의 성능과 호환성을 보장하려면 SuperLink, SuperNode 및 ServerApp 이미지를 " -"함께 실행할 때 버전이 동일해야 합니다. 이렇게 하면 원활한 통합을 보장하고 서" -"로 다른 버전을 사용할 때 발생할 수 있는 잠재적인 충돌이나 문제를 방지할 수 있" -"습니다." +"set the model parameters on the local model that are received from the " +"server" +msgstr "서버에서 수신한 로컬 모델의 모델 파라미터를 설정합니다" -#: ../../source/how-to-run-flower-using-docker.rst:32 -msgid "Flower SuperLink" -msgstr "Flower SuperLink" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" +msgstr "(신경망 레이어 목록으로 생각하면 됩니다) NumPy :code:`ndarray`로 받은 모델 파라미터 목록에 대해 반복합니다" -#: ../../source/how-to-run-flower-using-docker.rst:35 -msgid "Quickstart" -msgstr "빠른 시작" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +#, fuzzy +msgid "``get_parameters``" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-run-flower-using-docker.rst:37 -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "Flower를 사용해보고 싶다면 다음 명령을 사용하면 됩니다:" - -#: ../../source/how-to-run-flower-using-docker.rst:43 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#, fuzzy msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker Hub. " -"The tag specifies the Flower version. In this case, Flower 1.8.0. The ``--" -"rm`` flag tells Docker to remove the container after it exits." +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -"이 명령은 Docker Hub에서 ``1.8.0`` 태그가 있는 Docker 이미지를 가져옵니다. " -"이 태그는 Flower 버전을 지정합니다. 이 경우, Flower 1.8.0입니다. '`--rm`` 플" -"래그는 컨테이너가 종료된 후 컨테이너를 제거하도록 Docker에 지시합니다." +"모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 " +":code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" -#: ../../source/how-to-run-flower-using-docker.rst:49 -msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container starts. " -"We will show below how to save the state in a file on your host system." -msgstr "" -"기본적으로 Flower SuperLink는 상태를 in-memory에 유지합니다. Docker 플래그 " -"`--rm``을 사용하는 경우 컨테이너 시작 사이에 상태가 유지되지 않습니다. 아래에" -"서 호스트 시스템의 파일에 상태를 저장하는 방법을 보여드리겠습니다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +#, fuzzy +msgid "``fit``" +msgstr "``DISTRO``" -#: ../../source/how-to-run-flower-using-docker.rst:53 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"The ``-p :`` flag tells Docker to map the ports ``9091``/" -"``9092`` of the host to ``9091``/``9092`` of the container, allowing you to " -"access the Driver API on ``http://localhost:9091`` and the Fleet API on " -"``http://localhost:9092``. Lastly, any flag that comes after the tag is " -"passed to the Flower SuperLink. Here, we are passing the flag ``--insecure``." -msgstr "" -"``-p :`` 플래그는 호스트의 포트 ``9091``/``9092``를 컨테이너" -"의 ``9091``/``9092``에 매핑하여 ``http://localhost:9091``의 드라이버 API와 " -"``http://localhost:9092``의 Fleet API에 액세스할 수 있도록 Docker에 지시합니" -"다. 마지막으로, 태그 뒤에 오는 모든 플래그는 Flower SuperLink에 전달됩니다. " -"여기서는 ``--insecure``플래그를 전달합니다." +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "서버에서 받은 파라미터로 로컬 모델의 파라미터를 업데이트합니다" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 -msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly recommend " -"enabling `SSL `__ when deploying to a " -"production environment." -msgstr "" -"``--insecure`` 플래그는 안전하지 않은 통신(HTTPS가 아닌 HTTP 사용)을 활성화하" -"며 테스트 목적으로만 사용해야 합니다. 프로덕션 환경에 배포할 때는 `SSL " -"`__을 활성화할 것을 강력히 권장합니" -"다." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" +msgstr "로컬 훈련 세트에서 모델을 훈련합니다" -#: ../../source/how-to-run-flower-using-docker.rst:65 -msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" -msgstr "" -"'`--help``을 사용하면 SuperLink가 지원하는 모든 플래그를 볼 수 있습니다:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" +msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +#, fuzzy +msgid "``evaluate``" +msgstr ":code:`evaluate`" -#: ../../source/how-to-run-flower-using-docker.rst:72 -msgid "Mounting a volume to store the state on the host system" -msgstr "호스트 시스템에 상태를 저장할 볼륨 마운트하기" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "로컬 테스트 세트에서 업데이트된 모델을 평가합니다" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" +msgstr "로컬 손실 및 정확도를 서버에 반환합니다" -#: ../../source/how-to-run-flower-using-docker.rst:74 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -"If you want to persist the state of the SuperLink on your host system, all " -"you need to do is specify a directory where you want to save the file on " -"your host system and a name for the database file. By default, the SuperLink " -"container runs with a non-root user called ``app`` with the user ID " -"``49999``. It is recommended to create new directory and change the user ID " -"of the directory to ``49999`` to ensure the mounted directory has the proper " -"permissions. If you later want to delete the directory, you can change the " -"user ID back to the current user ID by running ``sudo chown -R $USER:$(id -" -"gn) state``." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -"호스트 시스템에서 SuperLink의 상태를 유지하려면 호스트 시스템에서 파일을 저장" -"할 디렉터리와 데이터베이스 파일의 이름을 지정하기만 하면 됩니다. 기본적으로 " -"SuperLink 컨테이너는 사용자 ID가 ``49999``인 ``app``이라는 루트가 아닌 사용자" -"로 실행됩니다. 마운트된 디렉터리에 적절한 권한이 있는지 확인하려면 새 디렉터" -"리를 생성하고 디렉터리의 사용자 ID를 ``49999``로 변경하는 것이 좋습니다. 나중" -"에 디렉터리를 삭제하려면 ``sudo chown -R $USER:$(id -gn) state``를 실행하여 " -"사용자 ID를 현재 사용자 ID로 다시 변경할 수 있습니다." +"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " +":code:`cifar.py`에 정의된 함수인 :code:`train()`과 :code:`test()`를 활용합니다. 따라서 여기서" +" 실제로 하는 일은 :code:`NumPyClient` 서브클래스를 통해 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " +"Flower에 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 있도록 type annotations을 포함했습니다." + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy +msgid "" +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" +msgstr "" +"이제 모델과 데이터를 모두 로드하는 함수를 정의하고, :code:`CifarClient`를 생성하고, 이 클라이언트를 시작하는 " +"작업만 남았습니다. 코드:`cifar.py`를 사용하여 데이터와 모델을 로드합니다. :code:`server.py`에서 사용한 것과" +" 동일한 IP 주소를 지정하여 :code:`fl.client.start_client()` 함수로 " +":code:`CifarClient`를 시작합니다:" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "여기까지입니다. 이제 두 개의 터미널 창을 추가로 열고 다음을 실행할 수 있습니다" -#: ../../source/how-to-run-flower-using-docker.rst:82 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -"In the example below, we create a new directory, change the user ID and tell " -"Docker via the flag ``--volume`` to mount the local ``state`` directory into " -"the ``/app/state`` directory of the container. Furthermore, we use the flag " -"``--database`` to specify the name of the database file." +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -"아래 예에서는 새 디렉터리를 생성하고, 사용자 ID를 변경하고, 플래그 ``--" -"volume``을 통해 Docker에게 로컬 ``state`` 디렉터리를 컨테이너의 ``/app/" -"state`` 디렉터리에 마운트하도록 지시합니다. 또한 ``--database`` 플래그를 사용" -"하여 데이터베이스 파일의 이름을 지정합니다." +"를 입력하고(그 전에 서버가 실행 중인지 확인하세요) (이전에는 중앙 집중식) PyTorch 프로젝트가 두 클라이언트에서 연합 " +"학습을 실행하는 것을 확인합니다. 축하합니다!" -#: ../../source/how-to-run-flower-using-docker.rst:95 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the SuperLink " -"with an empty database, simply remove the ``state.db`` file." -msgstr "" -"SuperLink가 시작되자마자 호스트 시스템의 ``state`` 디렉터리에 ``state.db`` 파" -"일이 생성됩니다. 파일이 이미 존재하는 경우 SuperLink는 파일에서 상태를 복원하" -"려고 시도합니다. 빈 데이터베이스로 SuperLink를 시작하려면 ``state.db`` 파일" -"을 제거하면 됩니다." - -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -msgid "Enabling SSL for secure connections" -msgstr "보안 연결을 위한 SSL 사용 설정" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" +msgstr "" +"이 예제의 전체 소스 코드: `파이토치: 중앙 Centralized에서 Federated으로 (코드) " +"`_. 물론 이 예제는 두 클라이언트가 완전히 동일한 데이터 세트를 로드하기 때문에 " +"다소 지나치게 단순화되어 있으며, 이는 현실적이지 않습니다. 이제 이 주제를 더 자세히 살펴볼 준비가 되셨습니다. 각 클라이언트에서" +" 서로 다른 CIFAR-10의 하위 집합을 사용해 보는 것은 어떨까요? 클라이언트를 더 추가하는 것은 어떨까요?" + +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "차등 프라이버시" -#: ../../source/how-to-run-flower-using-docker.rst:102 +#: ../../source/explanation-differential-privacy.rst:4 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-encoded " -"private key and a PEM-encoded certificate chain." +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -"SSL을 사용하려면 PEM으로 인코딩된 루트 인증서, PEM으로 인코딩된 개인 키 및 " -"PEM으로 인코딩된 인증서 체인이 필요합니다." +"의료, 금융 거래, 사용자 선호도 등과 같은 데이터 세트의 정보는 가치 있고 과학적 혁신의 잠재력을 지니고 있으며 중요한 비즈니스 " +"인사이트를 제공합니다. 그러나 이러한 데이터는 또한 민감한 정보이며 개인의 프라이버시를 침해할 위험이 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:106 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"For testing purposes, you can generate your own self-signed certificates. " -"The `Enable SSL connections `__ page contains a section that will " -"guide you through the process." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -"테스트 목적으로 자체 서명된 인증서를 생성할 수 있습니다. 'SSL 연결 사용 " -"`__ 페이지에 프로세스를 안내하는 섹션이 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:110 -msgid "" -"Assuming all files we need are in the local ``certificates`` directory, we " -"can use the flag ``--volume`` to mount the local directory into the ``/app/" -"certificates/`` directory of the container. This allows the SuperLink to " -"access the files within the container. The ``ro`` stands for ``read-only``. " -"Docker volumes default to ``read-write``; that option tells Docker to make " -"the volume ``read-only`` instead. Finally, we pass the names of the " -"certificates and key file to the SuperLink with the ``--ssl-ca-certfile``, " -"``--ssl-certfile`` and ``--ssl-keyfile`` flag." -msgstr "" -"필요한 모든 파일이 로컬``certificates`` 디렉터리에 있다고 가정하면, ``--" -"volume``플래그를 사용하여 로컬 디렉터리를 컨테이너의 ``/app/certificates/`` " -"디렉터리에 마운트할 수 있습니다. 이렇게 하면 SuperLink 가 컨테이너 내의 파일" -"에 액세스할 수 있습니다. ``ro``는 ``read-only``을 의미합니다. Docker 볼륨은 " -"기본적으로 ``read-write``로 설정되어 있는데, 이 옵션을 사용하면 볼륨을 " -"``read-only``으로 만들 수 있습니다. 마지막으로 인증서 및 키 파일의 이름을 " -"``--ssl-ca-certfile``, ``--ssl-certfile`` 및 ``--ssl-keyfile`` 플래그와 함께 " -"SuperLink에 전달합니다." - -#: ../../source/how-to-run-flower-using-docker.rst:128 -msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, the " -"mounted files and directories must have the proper permissions for the user " -"ID ``49999``. For example, to change the user ID of all files in the " -"``certificates/`` directory, you can run ``sudo chown -R 49999:49999 " -"certificates/*``." -msgstr "" -"기본적으로 Flower 컨테이너는 루트가 아닌 사용자 ``app``로 실행되므로 마운트" -"된 파일과 디렉터리에 사용자 ID ``49999``에 대한 적절한 권한이 있어야 합니다. " -"예를 들어, ``certificates/`` 디렉터리에 있는 모든 파일의 사용자 ID를 변경하려" -"면 ``sudo chown -R 49999:49999 certificates/*``를 실행하면 됩니다." - -#: ../../source/how-to-run-flower-using-docker.rst:134 -msgid "Flower SuperNode" -msgstr "Flower SuperNode" +"익명화와 같은 기존 방법만으로는 재식별 및 데이터 연결과 같은 공격으로 인해 효과가 없습니다. 그래서 차등 프라이버시가 " +"등장했습니다. 차등 프라이버시는 개인의 개인 정보 보호를 보장하면서 데이터를 분석할 수 있는 가능성을 제공합니다." -#: ../../source/how-to-run-flower-using-docker.rst:136 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower and " -"serves as a base for building your own SuperNode image." +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -"SuperNode Docker 이미지는 Flower의 사전 설치된 버전과 함께 제공되며, 자체 " -"SuperNode 이미지를 구축하기 위한 기반 역할을 합니다." +"하나의 레코드(예: 앨리스의 데이터)를 제외하고는 동일한 두 개의 데이터 세트가 있다고 상상해 보세요. 차등 프라이버시(DP)는 " +"평균 소득 계산과 같은 모든 분석(M)이 두 데이터 세트에 대해 거의 동일한 결과를 산출하도록 보장합니다(O와 O' 는 비슷할 " +"것입니다). 이렇게 하면 그룹 패턴은 보존하면서 개별 세부 정보는 가려져 개인의 정보가 군중 속에 숨겨집니다." -#: ../../source/how-to-run-flower-using-docker.rst:141 +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" +msgstr "DP 소개" + +#: ../../source/explanation-differential-privacy.rst:27 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) gets " -"released (ETA: May). A SuperNode nightly image must be paired with the " -"corresponding SuperLink and ServerApp nightly images released on the same " -"day. To ensure the versions are in sync, using the concrete tag, e.g., " -"``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -"SuperNode Docker 이미지는 현재 1.9.0 야간 릴리스에서만 작동합니다. 안정 버전" -"은 Flower 1.9.0(안정)이 출시되면 사용할 수 있습니다(예상 출시일: 5월). " -"SuperNode 야간 이미지는 같은 날 릴리스된 해당 SuperLink 및 서버앱 야간 이미지" -"와 페어링되어야 합니다. 버전이 동기화되도록 하려면 ``nightly`` 대신 ``1.9.0." -"dev20240501``과 같은 구체적인 태그를 사용하는 것이 좋습니다." +"DP를 달성하기 위해 가장 일반적으로 사용되는 메커니즘 중 하나는 분석의 전반적인 정확도를 유지하면서 데이터에서 각 개인의 기여도를" +" 가릴 수 있도록 분석 결과에 충분한 노이즈를 추가하는 것입니다." + +#: ../../source/explanation-differential-privacy.rst:32 +msgid "Formal Definition" +msgstr "공식 정의" -#: ../../source/how-to-run-flower-using-docker.rst:147 +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the " -"Flower repository, to illustrate how you can dockerize your ClientApp." +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -"Flower 레포지토리에서 찾을 수 있는 ``quickstart-pytorch`` 예제를 사용하여 " -"ClientApp을 도커라이즈하는 방법을 설명하겠습니다." +"차등 프라이버시(DP)는 공격자가 무작위 알고리즘의 출력을 통해 유추할 수 있는 정보에 대해 통계적 보장을 제공합니다. 이는 " +"노이즈를 추가하여 알고리즘의 출력에 대한 한 개인의 영향력에 대한 무조건적인 상한선을 제공합니다[1]. 무작위 메커니즘 M은 하나의" +" 레코드만 다른 두 개의 인접 데이터베이스인 D:sub:`1`과 D:sub:`2`의 경우, 가능한 모든 출력 S ⊆ " +"Range(A)에 대해 (:math:`\\epsilon`, :math:`\\delta`)-차등 프라이버시를 제공합니다:" -#: ../../source/how-to-run-flower-using-docker.rst:155 +#: ../../source/explanation-differential-privacy.rst:42 +#, fuzzy msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run your " -"ClientApp instead of the ``quickstart-pytorch`` example." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -"시작하기 전에 로컬 개발 환경에서 몇 가지 전제 조건을 충족해야 합니다. " -"'quickstart-pytorch' 예제 대신 ClientApp을 실행하려는 경우 첫 번째 부분을 건" -"너뛸 수 있습니다." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/how-to-run-flower-using-docker.rst:159 -msgid "Clone the Flower repository." -msgstr "플라워 레포지토리를 클론합니다." +#: ../../source/explanation-differential-privacy.rst:47 +msgid "" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." +msgstr "" +"프라이버시 예산이라고도 하는 :math:`\\epsilon` 매개변수는 프라이버시 손실을 측정하는 지표입니다. 이 매개변수는 " +"프라이버시와 효용의 균형을 제어하며, :math:`\\epsilon` 값이 낮을수록 프라이버시 수준이 높지만 효용도 감소할 가능성이" +" 높습니다. math:`\\delta` 매개변수는 상한값인 :math:`\\epsilon`이 적용되지 않는 작은 확률을 설명합니다." +" 차등 프라이버시를 달성하는 데 필요한 노이즈의 양은 출력의 감도에 비례하며, 이는 단일 레코드의 포함 또는 제거로 인한 출력의 " +"최대 변화를 측정합니다." + +#: ../../source/explanation-differential-privacy.rst:56 +msgid "Differential Privacy in Machine Learning" +msgstr "머신 러닝의 차등 프라이버시" -#: ../../source/how-to-run-flower-using-docker.rst:173 -msgid "Creating a SuperNode Dockerfile" -msgstr "SuperNode Dockerfile 만들기" +#: ../../source/explanation-differential-privacy.rst:58 +msgid "" +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." +msgstr "" +"머신 러닝에서 DP를 활용하여 학습 데이터의 개인정보를 보호할 수 있습니다. 차등 비공개 머신 러닝 알고리즘은 알고리즘이 개별 " +"데이터 포인트에 대한 특정 정보를 학습하지 못하도록 하여 모델이 민감한 정보를 노출하지 않도록 하는 방식으로 설계되었습니다. " +"노이즈가 도입되는 단계에 따라 머신 러닝 알고리즘에 DP를 적용하는 다양한 방법이 존재합니다. 한 가지 방법은 학습 데이터(특징 " +"또는 레이블)에 노이즈를 추가하는 것이고, 다른 방법은 모델 학습 중에 손실 함수의 기울기에 노이즈를 주입하는 것입니다. 또한 " +"이러한 노이즈를 모델의 출력에 통합할 수도 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -msgid "Let's assume the following project layout:" -msgstr "다음과 같은 프로젝트 레이아웃을 가정해 보겠습니다:" +#: ../../source/explanation-differential-privacy.rst:69 +msgid "Differential Privacy in Federated Learning" +msgstr "연합 학습의 차등 프라이버시" -#: ../../source/how-to-run-flower-using-docker.rst:184 +#: ../../source/explanation-differential-privacy.rst:71 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory where " -"the ``ClientApp`` code is located. In the file, we list all the dependencies " -"that the ClientApp requires." +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -"먼저 ``ClientApp`` 코드가 있는 디렉토리에 ``requirements.txt`` 파일을 " -"만들어야 합니다. 이 파일에는 클라이언트 앱에 필요한 모든 의존성을 나열합니다." +"연합 학습은 여러 당사자가 원시 데이터를 공유하지 않고도 공동으로 모델을 학습할 수 있는 데이터 최소화 접근 방식입니다. 그러나 " +"연합 학습은 새로운 개인정보 보호 문제를 야기하기도 합니다. 당사자와 중앙 서버 간의 모델 업데이트는 로컬 데이터에 대한 정보를 " +"유출할 수 있습니다. 이러한 유출은 멤버십 추론 및 속성 추론 공격이나 모델 반전 공격과 같은 공격에 악용될 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-differential-privacy.rst:78 msgid "" -"Note that `flwr `__ is already installed in " -"the ``flwr/supernode`` base image, so you only need to include other package " -"dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." -msgstr "" -"`flwr `__ 는 이미 ``flwr/supernode`` 기본 이" -"미지에 설치되어 있으므로, ``torch``, ``tensorflow`` 등과 같은 다른 패키지 " -"dependencies만 ``requirements.txt``에 포함시키면 됩니다." +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." +msgstr "DP는 연합 학습에서 클라이언트의 데이터에 대한 개인 정보 보호를 제공하는 데 중요한 역할을 할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:200 +#: ../../source/explanation-differential-privacy.rst:81 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` example, " -"create a new file called ``Dockerfile.supernode`` in ``examples/quickstart-" -"pytorch``." +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` 예제를 사용하는 경" -"우 ``examples/quickstart-pytorch``에 ``Dockerfile.supernode``라는 새 파일을 " -"생성합니다." +"개인 정보 제공의 세분성 또는 노이즈 추가 위치에 따라 연합 학습에는 다양한 형태의 DP가 존재합니다. 이 설명에서는 노이즈가 " +"추가되는 위치에 따라 서버(중앙이라고도 함) 또는 클라이언트(로컬이라고도 함)에서의 연합 학습에서 DP를 활용하는 두 가지 접근 " +"방식에 중점을 둡니다." -#: ../../source/how-to-run-flower-using-docker.rst:203 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -"``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습" -"니다." +"**중앙 차등 프라이버시**: DP는 서버에서 적용되며 집계된 모델이 각 클라이언트의 데이터에 대한 정보를 유출하는 것을 방지하는 " +"것이 목표입니다." -#: ../../source/how-to-run-flower-using-docker.rst:217 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image tagged " -"``nightly`` as a base image and set our working directory to ``/app``. The " -"following instructions will now be executed in the ``/app`` directory. Next, " -"we install the ClientApp dependencies by copying the ``requirements.txt`` " -"file into the image and run ``pip install``. In the last two lines, we copy " -"the ``client.py`` module into the image and set the entry point to ``flower-" -"client-app`` with the argument ``client:app``. The argument is the object " -"reference of the ClientApp (``:``) that will be run " -"inside the ClientApp." +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -"처음 두 줄에서는 ``nightly`` 태그가 붙은 SuperNode 이미지를 기본 이미지로 사" -"용하고 작업 디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제 ``/" -"app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로, ``requirements.txt`` 파" -"일을 이미지에 복사하여 ClientApp dependencies 요소를 설치하고 ``pip install``" -"을 실행합니다. 마지막 두 줄에서 ``client.py`` 모듈을 이미지에 복사하고 " -"``client:app`` 인수를 사용하여 진입점을 ``flower-client-app``로 설정합니다. " -"인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 객체 참조 (``:" -"``) 입니다." +"**로컬 차등 프라이버시**: DP는 정보를 서버로 보내기 전에 클라이언트 측에서 적용되며, 서버로 전송되는 업데이트가 클라이언트 " +"데이터에 대한 정보를 유출하는 것을 방지하는 것이 목표입니다." -#: ../../source/how-to-run-flower-using-docker.rst:226 -msgid "Building the SuperNode Docker image" -msgstr "SuperNode Docker 이미지 빌드" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "Central Differential Privacy" +msgstr "중앙 차등 프라이버시" + +#: ../../source/explanation-differential-privacy.rst:95 +msgid "" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." +msgstr "" +"사용자 수준 DP라고도 하는 이 접근 방식에서는 중앙 서버가 전역적으로 집계된 매개변수에 노이즈를 추가하는 역할을 담당합니다. " +"서버에 대한 신뢰가 필요하다는 점에 유의해야 합니다." + +#: ../../source/explanation-differential-privacy.rst:104 +msgid "" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." +msgstr "" +"연합 학습에서 중앙 DP를 구현하는 방법은 여러 가지가 있지만, 여기서는 [2]와 [3]에서 제안한 알고리즘에 집중합니다. 전반적인" +" 접근 방식은 클라이언트가 전송한 모델 업데이트를 잘라내고 집계된 모델에 약간의 노이즈를 추가하는 것입니다. 각 반복에서 특정 " +"확률로 훈련할 무작위 클라이언트 세트가 선택됩니다. 각 클라이언트는 자체 데이터에 대해 로컬 학습을 수행합니다. 그런 다음 각 " +"클라이언트의 업데이트는 특정 값 `S`(민감도 `S`)에 의해 잘립니다. 이렇게 하면 개별 클라이언트의 영향을 제한할 수 있어 " +"개인정보 보호에 중요하고 견고성에 도움이 되는 경우가 많습니다. 이를 달성하기 위한 일반적인 접근 방식은 클라이언트 모델 업데이트의" +" `L2` 규범을 제한하여 더 큰 업데이트가 규범 `S`에 맞도록 축소되도록 하는 것입니다." -#: ../../source/how-to-run-flower-using-docker.rst:228 -msgid "" -"Next, we build the SuperNode Docker image by running the following command " -"in the directory where Dockerfile and ClientApp code are located." -msgstr "" -"다음으로, Dockerfile 및 ClientApp 코드가 있는 디렉터리에서 다음 명령을 실행하" -"여 SuperNode Docker 이미지를 빌드합니다." +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" +msgstr "클리핑" -#: ../../source/how-to-run-flower-using-docker.rst:235 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -"이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1`` 태그를 붙였습니다. 여" -"기서 선택한 값은 예시일 뿐이라는 점을 기억하세요. 필요에 따라 변경할 수 있습" -"니다." - -#: ../../source/how-to-run-flower-using-docker.rst:240 -msgid "Running the SuperNode Docker image" -msgstr "SuperNode Docker 이미지 실행" +"그 후 가우시안 메커니즘을 사용하여 모든 클라이언트의 업데이트 합계를 왜곡하기 위해 노이즈를 추가합니다. 노이즈의 양은 감도 값에 " +"따라 조정되어 프라이버시 보장을 얻습니다. 가우시안 메커니즘은 `N (0, σ²)`에서 샘플링된 노이즈와 함께 사용됩니다. 여기서 " +"`σ = (noise_scale * S) / (샘플링된 클라이언트 수)`입니다." -#: ../../source/how-to-run-flower-using-docker.rst:242 -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." +#: ../../source/explanation-differential-privacy.rst:126 +msgid "Clipping" +msgstr "클리핑" -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "Let's break down each part of this command:" -msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" +#: ../../source/explanation-differential-privacy.rst:128 +msgid "" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." +msgstr "중앙 DP에서 일반적으로 사용되는 클리핑에는 고정 클리핑과 조정 클리핑의 두 가지 형태가 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -msgid "``docker run``: This is the command to run a new Docker container." -msgstr "``docker run``: 새 Docker 컨테이너를 실행하는 명령입니다." +#: ../../source/explanation-differential-privacy.rst:131 +msgid "" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." +msgstr "" +"**고정 클리핑** : 클라이언트의 업데이트 크기에 대해 미리 정의된 고정 임계값이 설정됩니다. 이 임계값을 초과하는 모든 " +"업데이트는 임계값으로 다시 클리핑됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 +#: ../../source/explanation-differential-privacy.rst:133 msgid "" -"``--rm``: This option specifies that the container should be automatically " -"removed when it stops." +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -"``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." +"**조정 클리핑** : 클리핑 임계값은 관찰된 업데이트 분포에 따라 동적으로 조정됩니다[4]. 즉, 클리핑 값은 업데이트 표준 " +"분포의 사분위수에 따라 라운드가 진행되는 동안 조정됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:254 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "``flwr_supernode:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." +#: ../../source/explanation-differential-privacy.rst:137 +msgid "" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." +msgstr "고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 요구 사항, 데이터 배포, 모델 복잡성 등 다양한 요인에 따라 달라집니다." -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -msgid "``--insecure``: This option enables insecure communication." -msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +msgid "Local Differential Privacy" +msgstr "로컬 차등 프라이버시" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-differential-privacy.rst:143 msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of the " -"SuperLinks Fleet" +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정" -"합니다" +"이 접근 방식에서는 각 클라이언트가 DP를 수행할 책임이 있습니다. 로컬 DP는 완전히 신뢰할 수 있는 애그리게이터가 필요하지 " +"않지만, 로컬 DP는 중앙 DP에 비해 정확도는 떨어져도 개인 정보 보호는 더 우수하다는 점에 유의해야 합니다." -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." -msgstr "" -"API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." +#: ../../source/explanation-differential-privacy.rst:152 +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "이 설명에서는 로컬 DP를 달성하는 두 가지 형태에 중점을 둡니다:" -#: ../../source/how-to-run-flower-using-docker.rst:269 +#: ../../source/explanation-differential-privacy.rst:154 msgid "" -"To test running Flower locally, you can create a `bridge network `__, use the ``--network`` argument and pass the name of the Docker " -"network to run your SuperNodes." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -"로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge network `__를 생성하고 ``--network`` argument를 사용하고 SuperNodes를 실행" -"할 Docker 네트워크의 이름을 전달하면 됩니다." +"각 클라이언트는 로컬 업데이트를 서버로 보내기 전에 로컬 업데이트에 노이즈를 추가합니다. 로컬 모델의 감도를 ∆로 간주하여 가우시안" +" 노이즈가 σ의 노이즈 스케일로 적용되어 (:math:`\\epsilon`, :math:`\\delta`)-DP를 달성하기 위해, " +"여기서 σ는 노이즈 스케일입니다:" -#: ../../source/how-to-run-flower-using-docker.rst:273 +#: ../../source/explanation-differential-privacy.rst:158 +#, fuzzy msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -"태그 뒤에 오는 모든 argument는 Flower SuperNode 바이너리에 전달됩니다. " -"SuperNode가 지원하는 사용 가능한 모든 플래그를 보려면 실행하세요:" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" -#: ../../source/how-to-run-flower-using-docker.rst:283 +#: ../../source/explanation-differential-privacy.rst:163 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -"SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 " -"합니다." +"각 클라이언트는 로컬 트레이닝(DP-SGD) 중에 모델의 gradient에 노이즈를 추가합니다. 보다 구체적으로, 이 접근 " +"방식에서는 gradient이 클리핑되고 보정된 노이즈가 gradient에 주입됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:285 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" -"Assuming the certificate already exists locally, we can use the flag ``--" -"volume`` to mount the local certificate into the container's ``/app/`` " -"directory. This allows the SuperNode to access the certificate within the " -"container. Use the ``--root-certificates`` flag when starting the container." -msgstr "" -"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로" -"컬 인증서를 컨테이너의 ``/app/`` 디렉터리에 마운트할 수 있습니다. 이렇게 하" -"면 SuperNode가 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작" -"할 때 ``--root-certificates`` 플래그를 사용하세요." +"Please note that these two approaches are providing privacy at different " +"levels." +msgstr "이 두 가지 접근 방식은 서로 다른 수준의 개인정보 보호 기능을 제공한다는 점에 유의하세요." -#: ../../source/how-to-run-flower-using-docker.rst:297 -msgid "Flower ServerApp" -msgstr "Flower 서버앱" +#: ../../source/explanation-differential-privacy.rst:169 +msgid "**References:**" +msgstr "**참고:**" -#: ../../source/how-to-run-flower-using-docker.rst:299 -msgid "" -"The procedure for building and running a ServerApp image is almost identical " -"to the SuperNode image." -msgstr "" -"ServerApp 이미지를 빌드하고 실행하는 절차는 SuperNode 이미지와 거의 동일합니" -"다." +#: ../../source/explanation-differential-privacy.rst:171 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +msgstr "[1] Dwork 외. 차등 프라이버시의 알고리즘적 기초." -#: ../../source/how-to-run-flower-using-docker.rst:301 +#: ../../source/explanation-differential-privacy.rst:173 msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a pre-" -"installed version of Flower and serves as a base for building your own " -"ServerApp image." -msgstr "" -"SuperNode 이미지와 마찬가지로 ServerApp Docker 이미지는 Flower의 사전 설치된 " -"버전과 함께 제공되며, 자체 ServerApp 이미지를 구축하기 위한 기본 역할을 합니" -"다." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." +msgstr "[2] McMahan 외. 차등적 개인 반복 언어 모델 학습." -#: ../../source/how-to-run-flower-using-docker.rst:304 +#: ../../source/explanation-differential-privacy.rst:175 msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the Flower " -"SuperNode section. If you have not already done so, please follow the " -"`SuperNode Prerequisites`_ before proceeding." -msgstr "" -"여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-pytorch`` 예제를 사용" -"하겠습니다. 아직 수행하지 않았다면 계속 진행하기 전에 `SuperNode " -"Prerequisites`_ 을 따르세요." +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "[3] Geyer 외. 차등적 개인 연합 학습: 고객 수준의 관점." -#: ../../source/how-to-run-flower-using-docker.rst:309 -msgid "Creating a ServerApp Dockerfile" -msgstr "ServerApp Dockerfile 만들기" +#: ../../source/explanation-differential-privacy.rst:177 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +msgstr "[4] Galen 외. 조정형 클리핑을 통한 차등적 개인 학습." -#: ../../source/how-to-run-flower-using-docker.rst:320 -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples/" -"quickstart-pytorch``." -msgstr "" -"먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을 생성해야 합니다. " -"``quickstart-pytorch`` 예제를 사용하는 경우 ``examples/quickstart-pytorch``" -"에 ``Dockerfile.serverapp``이라는 새 파일을 생성합니다." +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "연합 평가" -#: ../../source/how-to-run-flower-using-docker.rst:324 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -"``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니" -"다." +"연합 학습 시스템에서 모델을 평가하는 데는 중앙 집중식(또는 서버 측) 평가와 연합(또는 클라이언트 측) 평가라는 두 가지 주요 " +"접근 방식이 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:335 -msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image tagged " -"``1.8.0`` as a base image and set our working directory to ``/app``. The " -"following instructions will now be executed in the ``/app`` directory. In " -"the last two lines, we copy the ``server.py`` module into the image and set " -"the entry point to ``flower-server-app`` with the argument ``server:app``. " -"The argument is the object reference of the ServerApp (``:" -"``) that will be run inside the ServerApp container." -msgstr "" -"처음 두 줄에서는 ``1.8.0`` 태그가 붙은 ServerApp 이미지를 기본 이미지로 사용" -"하고 작업 디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제 ``/" -"app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막 두 줄에서는 ``server.py`` " -"모듈을 이미지에 복사하고 ``server:app`` argument를 사용하여 진입점을 " -"``flower-server-app``로 설정합니다. 인수는 ServerApp 컨테이너 내에서 실행될 " -"ServerApp의 객체 참조(``:``)입니다." +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "중앙 집중식 평가" -#: ../../source/how-to-run-flower-using-docker.rst:343 -msgid "Building the ServerApp Docker image" -msgstr "ServerApp Docker 이미지 빌드" +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "기본 제공 전략" -#: ../../source/how-to-run-flower-using-docker.rst:345 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"Next, we build the ServerApp Docker image by running the following command " -"in the directory where Dockerfile and ServerApp code are located." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -"다음으로, Docker파일과 ServerApp 코드가 있는 디렉터리에서 다음 명령을 실행하" -"여 ServerApp Docker 이미지를 빌드합니다." +"모든 기본 제공 전략은 초기화 중에 평가 함수를 제공하여 중앙 집중식 평가를 지원합니다. 평가 함수는 현재 글로벌 모델 파라미터를 " +"입력으로 받아 평가 결과를 반환할 수 있는 모든 함수입니다:" -#: ../../source/how-to-run-flower-using-docker.rst:352 +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" +msgstr "사용자 정의 전략" + +#: ../../source/explanation-federated-evaluation.rst:63 +#, fuzzy msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -"이미지에``flwr_serverapp``이라는 이름을 붙이고 ``0.0.1``이라는 태그를 붙였습" -"니다. 여기서 선택한 값은 예시일 뿐이라는 점을 기억하세요. 필요에 따라 변경할 " -"수 있습니다." +"코드:`전략` 추상화는 현재 전역 모델 파라미터를 평가하는 데 직접 사용할 수 있는 :코드:`평가`라는 메서드를 제공합니다. 현재 " +"서버 구현에서는 매개변수 집계 후와 연합 평가 전에 :code:`evaluate`를 호출합니다(다음 단락 참조)." + +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" +msgstr "연합 평가" -#: ../../source/how-to-run-flower-using-docker.rst:357 -msgid "Running the ServerApp Docker image" -msgstr "ServerApp Docker 이미지 실행" +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" +msgstr "연합 평가 구현" -#: ../../source/how-to-run-flower-using-docker.rst:359 -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." +#: ../../source/explanation-federated-evaluation.rst:74 +#, fuzzy +msgid "" +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." +msgstr "클라이언트 측 평가는 :code:`Client.evaluate` 메서드에서 이루어지며 서버 측에서 구성할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:371 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "``flwr_serverapp:0.0.1``: 사용할 Docker 이미지의 태그 이름입니다." +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" +msgstr "연합 평가 구성" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of the " -"SuperLinks Driver" -msgstr "" -"``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지" -"정합니다" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "연합 평가는 서버 측에서 구성할 수 있습니다. 기본 제공 전략은 다음 인수를 지원합니다:" -#: ../../source/how-to-run-flower-using-docker.rst:385 +#: ../../source/explanation-federated-evaluation.rst:113 +#, fuzzy msgid "" -"To test running Flower locally, you can create a `bridge network `__, use the ``--network`` argument and pass the name of the Docker " -"network to run your ServerApps." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -"로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge network `__,를 생성하고 ``--network`` argument를 사용하여 ServerApp을 실행" -"할 Docker 네트워크의 이름을 전달하면 됩니다." +":code:`fraction_evaluate`: 평가를 위해 선택될 클라이언트의 비율을 정의하는 :code:`float`입니다. " +"코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:`100` 클라이언트가 서버에 연결되어 " +"있는 경우 :code:`10`이 평가를 위해 무작위로 선택됩니다. code:`fraction_evaluate`가 " +":code:`0.0`으로 설정된 경우 연합 평가가 비활성화됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:389 +#: ../../source/explanation-federated-evaluation.rst:118 +#, fuzzy msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -"태그 뒤에 오는 모든 argument는 Flower ServerApp 바이너리에 전달됩니다. " -"ServerApp에서 지원하는 사용 가능한 모든 플래그를 보려면 실행하세요:" +":code:`min_evaluate_clients`: 평가를 위해 선택할 최소 클라이언트 수. :code:`int`. " +"코드:`fraction_evaluate`가 :code:`0.1`로 설정되어 있고 :code:`fraction_evaluate`가 " +"20으로 설정되어 있으며 :code:`100` 클라이언트가 서버에 연결되어 있는 경우 :code:`20` 클라이언트가 평가를 위해 " +"선택됩니다." -#: ../../source/how-to-run-flower-using-docker.rst:399 +#: ../../source/explanation-federated-evaluation.rst:122 +#, fuzzy msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" -"SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 " -"합니다." +":code:`min_available_clients`: federated 평가 단계를 시작하기 전에 서버에 연결해야 하는 최소 " +"클라이언트 수를 정의하는 :code:`int`입니다. 서버에 연결된 클라이언트가 " +":code:`min_available_clients`보다 적으면 서버는 더 많은 클라이언트가 연결될 때까지 기다렸다가 평가를 위한 " +"클라이언트 샘플링을 계속합니다." -#: ../../source/how-to-run-flower-using-docker.rst:401 +#: ../../source/explanation-federated-evaluation.rst:127 +#, fuzzy msgid "" -"Assuming the certificate already exists locally, we can use the flag ``--" -"volume`` to mount the local certificate into the container's ``/app/`` " -"directory. This allows the ServerApp to access the certificate within the " -"container. Use the ``--root-certificates`` flags when starting the container." +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -"인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` 플래그를 사용하여 로" -"컬 인증서를 컨테이너의 ``/app/`` 디렉터리에 마운트할 수 있습니다. 이렇게 하" -"면 ServerApp이 컨테이너 내의 인증서에 액세스할 수 있습니다. 컨테이너를 시작" -"할 때 ``--root-certificates`` 플래그를 사용하세요." - -#: ../../source/how-to-run-flower-using-docker.rst:412 -msgid "Advanced Docker options" -msgstr "고급 Docker 옵션" +"code:`on_evaluate_config_fn`: 선택한 클라이언트로 전송할 구성 사전을 반환하는 함수입니다. 이 함수는 각 " +"단계 중에 호출되며, 서버 측에서 클라이언트 측 평가를 사용자 지정하는 편리한 방법을 제공합니다(예: 수행되는 유효성 검사 단계 수" +" 구성)." -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" -msgstr "루트 사용자 권한으로 실행" +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" +msgstr "훈련 중 로컬 모델 업데이트 평가" -#: ../../source/how-to-run-flower-using-docker.rst:417 +#: ../../source/explanation-federated-evaluation.rst:159 +#, fuzzy msgid "" -"Flower Docker images, by default, run with a non-root user (username/" -"groupname: ``app``, UID/GID: ``49999``). Using root user is not recommended " -"unless it is necessary for specific tasks during the build process. Always " -"make sure to run the container as a non-root user in production to maintain " -"security best practices." +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" -"기본적으로 Flower Docker 이미지는 루트 사용자가 아닌 사용자(사용자명/그룹명:" -"``app``, UID/GID: ``49999``)로 실행됩니다. 빌드 프로세스 중 특정 작업에 필요" -"한 경우가 아니라면 루트 사용자를 사용하지 않는 것이 좋습니다. 보안 모범 사례" -"를 유지하려면 항상 프로덕션 환경에서 루트 사용자가 아닌 사용자로 컨테이너를 " -"실행해야 합니다." +"모델 파라미터는 훈련 중에도 평가할 수 있습니다. :code:`Client.fit`은 임의의 평가 결과를 dictionary로 " +"반환할 수 있습니다:" -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" -msgstr "**루트 사용자 권한으로 컨테이너 실행하기**" +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" +msgstr "전체 코드 예제" -#: ../../source/how-to-run-flower-using-docker.rst:424 +#: ../../source/explanation-federated-evaluation.rst:203 msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -"``-u`` 플래그를 사용하여 Docker 이미지를 실행하고 사용자 이름으로 ``root``를 " -"지정합니다:" +"연합 평가와 중앙 집중식 평가를 모두 사용하는 전체 코드 예제는 *고급 텐서플로우 예제*(다른 프레임워크에서 구현된 워크로드에도 " +"동일한 접근 방식을 적용할 수 있음)를 참조하세요: " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." -msgstr "이 명령은 루트 사용자 권한으로 Docker 컨테이너를 실행합니다." - -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" -msgstr "**루트 사용자 권한으로 빌드 프로세스를 실행합니다**" - -#: ../../source/how-to-run-flower-using-docker.rst:434 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the ``USER " -"root`` directive within your Dockerfile." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -"Docker 이미지 빌드 과정에서 루트 사용자로 전환하여 누락된 시스템 의존성을 " -"설치하려면 Dockerfile 내에서 ``USER root`` 지시어를 사용할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:454 -msgid "Using a different Flower version" -msgstr "다른 Flower 버전 사용" +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Flower 아키텍처" -#: ../../source/how-to-run-flower-using-docker.rst:456 +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on " -"`Docker Hub `__." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -"다른 버전의 Flower를 사용하려면 태그를 변경하여 사용할 수 있습니다(예: " -"Flower nightly). 사용 가능한 모든 버전은 `Docker Hub `__에 있습니다." - -#: ../../source/how-to-run-flower-using-docker.rst:460 -msgid "Pinning a Docker image to a specific version" -msgstr "특정 버전에 Docker 이미지 고정하기" -#: ../../source/how-to-run-flower-using-docker.rst:462 +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that you " -"always use the same image, you can specify the hash of the image instead of " -"the tag." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -"태그 뒤에 있는 이미지가 업데이트될 수 있습니다. 이러한 업데이트에는 " -"일반적으로 Flower의 기능을 변경해서는 안 되는 시스템 의존성에 대한 보안 " -"업데이트가 포함됩니다. 그러나 항상 동일한 이미지를 사용하려면 태그 대신 " -"이미지의 해시를 지정할 수 있습니다." -#: ../../source/how-to-run-flower-using-docker.rst:467 +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -"다음 명령은 ``superlink:1.8.0`` 태그가 참조하는 현재 이미지 해시를 반환합니" -"다:" -#: ../../source/how-to-run-flower-using-docker.rst:474 -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "다음으로, 새 SuperLink 컨테이너를 실행할 때 해시를 고정할 수 있습니다:" +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:483 -msgid "Setting environment variables" -msgstr "환경 변수 설정" +#: ../../source/explanation-flower-architecture.rst:21 +#, fuzzy +msgid "Hub-and-spoke topology in federated learning" +msgstr "연합 학습이란 무엇입니까?" -#: ../../source/how-to-run-flower-using-docker.rst:485 +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -"Docker 컨테이너 내에서 변수를 설정하려면 ``-e =`` 플래그를 사용" -"하면 됩니다." - -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "시뮬레이션 실행" -#: ../../source/how-to-run-simulations.rst:8 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients but " -"without having to source, configure and mange a large number of physical " -"devices; you might want to run your FL workloads as fast as possible on the " -"compute systems you have access to without having to go through a complex " -"setup process; you might want to validate your algorithm on different " -"scenarios at varying levels of data and system heterogeneity, client " -"availability, privacy budgets, etc. These are among some of the use-cases " -"where simulating FL workloads makes sense. Flower can accommodate these " -"scenarios by means of its `VirtualClientEngine `_ or VCE." -msgstr "" -"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클" -"라이언트 집단에서 워크로드를 실행하되 많은 수의 물리적 장치를 소싱, 구성 및 " -"관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨" -"팅 시스템에서 최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이" -"터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 다양한 시나리오에" -"서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사" -"례는 FL 워크로드 시뮬레이션이 적합한 사용 사례 중 일부입니다. Flower는 " -"`VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 있습니다." - -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual` " -"clients. These clients are identical to `non-virtual` clients (i.e. the ones " -"you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a " -"class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In " -"addition to that, clients managed by the :code:`VirtualClientEngine` are:" -msgstr "" -":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니" -"다. 이러한 클라이언트는 `non-virtual` 클라이언트(예: `flwr.client." -"start_client `_ 명령을 통해 실행하는 클라이언" -"트)와 동일하며, `flwr.client.NumPyClient `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 방식으" -"로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언" -"트는 다음과 같습니다:" - -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of the " -"compute and memory on your system. You as a user can control this at the " -"beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할" -"당받는다는 것을 의미합니다. 사용자는 시뮬레이션을 시작할 때 이를 제어할 수 있" -"으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " -"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시" -"에 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:13 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"self-managed: this means that you as a user do not need to launch clients " -"manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대" -"신 :code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is required " -"in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards, releasing the resources it was " -"assigned and allowing in this way other clients to participate." +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니" -"다(예: `fit() `_을 수행하기 위해). " -"객체는 나중에 소멸되어 할당된 리소스를 해제하고 다른 클라이언트가 참여할 수 " -"있도록 허용합니다." -#: ../../source/how-to-run-simulations.rst:16 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use of " -"`Actors `_ to spawn " -"`virtual` clients and run their workload." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프" -"레임워크인 `Ray `_를 사용하여 `virtual` 클라이언트를 구" -"현합니다. 특히 Flower의 :code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생" -"성하고 해당 워크로드를 실행합니다." - -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" -msgstr "Flower 시뮬레이션 시작" -#: ../../source/how-to-run-simulations.rst:22 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"Running Flower simulations still require you to define your client class, a " -"strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your simulation " -"is done with `start_simulation `_ and a minimal example looks as follows:" +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -"Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, 전략 및 유틸리티 함" -"수를 정의하여 데이터 세트를 다운로드하고 로드(및 파티션)해야 합니다. 이 작업" -"을 마친 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면 되며, 최소한의 예시는 다음과 같습니" -"다:" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" -msgstr "VirtualClientEngine 리소스" +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" +msgstr "" -#: ../../source/how-to-run-simulations.rst:45 +#: ../../source/explanation-flower-architecture.rst:43 msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all " -"GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system " -"resources are used for simulation. You can do this via the :code:" -"`ray_init_args` input argument to :code:`start_simulation` which the VCE " -"internally passes to Ray's :code:`ray.init` command. For a complete list of " -"settings you can configure check the `ray.init `_ documentation. Do not set :" -"code:`ray_init_args` if you want the VCE to use all your system's CPUs and " -"GPUs." -msgstr "" -"기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 " -"있으며, 이는 Ray를 시작할 때의 기본 동작이기도 합니다. 그러나 일부 설정에서" -"는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 " -"설정은 VCE가 내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 :code:" -"`start_simulation`에 대한 :code:`ray_init_args` 입력 인수를 통해 수행할 수 있" -"습니다. 구성할 수 있는 전체 설정 목록은 `ray.init `_ 설명서를 확인하세요. VCE가 " -"시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지 " -"마세요." - -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "클라이언트 리소스 할당" +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." +msgstr "" -#: ../../source/how-to-run-simulations.rst:63 +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and " -"nothing else) to each virtual client. This means that if your system has 10 " -"cores, that many virtual clients can be concurrently running." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -"기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어" -"를 할당합니다(그 외에는 아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 " -"경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:65 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"More often than not, you would probably like to adjust the resources your " -"clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your simulation " -"by setting the argument `client_resources` to `start_simulation `_. Two keys are internally used " -"by Ray to schedule and spawn workloads (in our case Flower clients):" +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이" -"언트에 할당되는 리소스를 조정하고 싶을 것입니다. 시뮬레이션을 시작할 때 " -"`client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 내부" -"적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하" -"고 스폰합니다:" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/explanation-flower-architecture.rst:62 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Flower 아키텍처" + +#: ../../source/explanation-flower-architecture.rst:62 +msgid "The basic Flower architecture for federated learning." msgstr "" -":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." -#: ../../source/how-to-run-simulations.rst:68 +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "몇 가지 예를 살펴보겠습니다:" +#: ../../source/explanation-flower-architecture.rst:70 +msgid "" +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." +msgstr "" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and having " -"orders of magnitude more `dormant` (i.e. not participating in a round) " -"clients. Let's say you want to have 100 clients per round but your system " -"can only accommodate 8 clients concurrently. The :code:`VirtualClientEngine` " -"will schedule 100 jobs to run (each simulating a client sampled by the " -"strategy) and then will execute them in a resource-aware manner in batches " -"of 8." -msgstr "" -"code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있" -"지만, 동일한 라운드에서 수십, 수백 또는 수천 개의 클라이언트를 실행하고 훨씬 " -"더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 수" -"는 없습니다. 라운드당 100명의 클라이언트를 받고 싶지만 시스템이 동시에 8명의 " -"클라이언트만 수용할 수 있다고 가정해 봅시다. code:`VirtualClientEngine`은 실" -"행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 다" -"음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." - -#: ../../source/how-to-run-simulations.rst:91 +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." +msgstr "" + +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a look " -"at the `Ray documentation `_." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -"리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 사용자 지정 리소스를 정" -"의하는 방법에 대한 모든 복잡한 세부 사항을 이해하려면 'Ray 문서 '를 참조하세요." -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" -msgstr "시뮬레이션 예제" +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture" +msgstr "" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in Tensorflow/" -"Keras and PyTorch are provided in the `Flower repository `_. You can run them on Google Colab too:" +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture with Flower" msgstr "" -"Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이" -"션 예제는 `Flower 레포지토리 `_에서 제공됩니" -"다. Google Colab에서도 실행할 수 있습니다:" -#: ../../source/how-to-run-simulations.rst:98 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"`Tensorflow/Keras Simulation `_: 100 clients collaboratively train a MLP " -"model on MNIST." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -"`Tensorflow/Keras 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 " -"MLP 모델을 훈련합니다." -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈" -"련합니다." -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" -msgstr "멀티 노드 Flower 시뮬레이션" +#: ../../source/explanation-flower-architecture.rst:103 +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/explanation-flower-architecture.rst:103 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations across " -"multiple compute nodes. Before starting your multi-node simulation ensure " -"that you:" +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -"Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬" -"레이션을 실행할 수 있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항" -"을 확인하세요:" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." +#: ../../source/explanation-flower-architecture.rst:106 +msgid "" +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" +msgstr "" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." +#: ../../source/explanation-flower-architecture.rst:115 +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" -"Have a copy of your dataset in all nodes (more about this in :ref:" -"`simulation considerations `)" +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -"모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation " -"considerations `에서 확인하세요)" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation `_ so the :code:" -"`VirtualClientEngine` attaches to a running Ray instance." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 :code:" -"`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/explanation-flower-architecture.rst:121 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start --head`. " -"This command will print a few lines, one of which indicates how to attach " -"other nodes to the head node." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -"헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 " -"명령은 몇 줄을 출력하며, 그 중 하나는 다른 노드를 헤드 노드에 연결하는 방법" -"을 나타냅니다." -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" -"Attach other nodes to the head node: copy the command shown after starting " -"the head and execute it on terminal of a new node: for example :code:`ray " -"start --address='192.168.1.132:6379'`" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -"헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드" -"의 터미널에서 실행합니다: 예: :code:`ray start --" -"address='192.168.1.132:6379'`" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" +msgstr "" + +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -"위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 " -"헤드 노드에서 코드를 실행할 수 있습니다." -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster " -"you simply need to run the command :code:`ray stop` in each node's terminal " -"(including the head node)." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -"시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널" -"에서 :code:`ray stop` 명령을 실행하기만 하면 됩니다." -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" - -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/explanation-flower-architecture.rst:146 +#, fuzzy msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -"여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합" -"니다:" +"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " +"언제든지 공유해 주세요!" -#: ../../source/how-to-run-simulations.rst:124 -msgid "" -"User :code:`ray status` to check all nodes connected to your head node as " -"well as the total resources available to the :code:`VirtualClientEngine`." -msgstr "" -"사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 :code:" -"`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "FED 템플릿" -#: ../../source/how-to-run-simulations.rst:126 -msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, all " -"GPUs) will be visible by the head node. This means that the :code:" -"`VirtualClientEngine` can schedule as many `virtual` clients as that node " -"can possible run. In some settings you might want to exclude certain " -"resources from the simulation. You can do this by appending `--num-" -"cpus=` and/or `--num-gpus=` in any :" -"code:`ray start` command (including when starting the head)" -msgstr "" -"새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 " -"헤드 노드에 표시됩니다. 즉, :code:`VirtualClientEngine`은 해당 노드가 실행할 " -"수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. 일부 설정에서는 시뮬레" -"이션에서 특정 리소스를 제외하고 싶을 수 있습니다. 모든 :code:`ray start` 명령" -"(헤드 시작 시 포함)에 `--num-cpus=` 및/또는 `--num-" -"gpus=`를 추가하여 이 작업을 수행하면 됩니다" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "목차" -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" -msgstr "시뮬레이션 시 고려 사항" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[목차](#목차)" -#: ../../source/how-to-run-simulations.rst:135 -msgid "" -"We are actively working on these fronts so to make it trivial to run any FL " -"workload with Flower simulation." -msgstr "" -"Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측" -"면에서 적극적으로 노력하고 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[요약](#요약)" -#: ../../source/how-to-run-simulations.rst:138 -msgid "" -"The current VCE allows you to run Federated Learning workloads in simulation " -"mode whether you are prototyping simple scenarios on your personal laptop or " -"you want to train a complex FL pipeline across multiple high-performance GPU " -"nodes. While we add more capabilities to the VCE, the points below highlight " -"some of the considerations to keep in mind when designing your FL pipeline " -"with Flower. We also highlight a couple of current limitations in our " -"implementation." -msgstr "" -"현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를 프로토타이핑하든, 여러 " -"고성능 GPU 노드에서 복잡한 FL 파이프라인을 훈련하든 상관없이 시뮬레이션 모드" -"에서 Federated 학습 워크로드를 실행할 수 있습니다. VCE에 더 많은 기능을 추가" -"하는 동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 " -"가지 사항을 강조합니다. 또한 현재 구현에서 몇 가지 제한 사항을 강조합니다." +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[동기](#동기)" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" -msgstr "GPU 리소스" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[목표](#목표)" -#: ../../source/how-to-run-simulations.rst:143 -msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key :" -"code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" -msgstr "" -"VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트" -"에 GPU 메모리 공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본" -"적으로 사용됩니다:" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[비목표](#비목표)" -#: ../../source/how-to-run-simulations.rst:146 -msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you " -"set :code:`num_gpus=0.5` and you have two GPUs in your system with different " -"(e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients concurrently." -msgstr "" -"GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: " -"32GB와 8GB) VRAM 용량을 가진 두 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하" -"면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[제안](#제안)" -#: ../../source/how-to-run-simulations.rst:147 -msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" -msgstr "" -"관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는" -"지 알지 못합니다. 여기서 두 가지 시사점을 얻을 수 있습니다:" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[단점](#단점)" -#: ../../source/how-to-run-simulations.rst:149 -msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" -msgstr "" -"집계 후 '글로벌 모델'을 평가하려면 Flower 서버에 GPU가 필요할 수 있습니다" -"(예: `evaluate method `_를 사용할 때)" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[고려되는 대안](#고려되는 대안)" -#: ../../source/how-to-run-simulations.rst:150 -msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with :code:" -"`CUDA_VISIBLE_DEVICES=\"\"` when launching your experiment." -msgstr "" -"동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시" -"작할 때 :code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[부록](#부록)" -#: ../../source/how-to-run-simulations.rst:153 -msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` are " -"not `enforced` (i.e. they can be exceeded) which can result in the situation " -"of client using more VRAM than the ratio specified when starting the " -"simulation." -msgstr "" -"또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, " -"초과할 수 있음) 클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 " -"VRAM을 사용하는 상황이 발생할 수 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "요약" -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" -msgstr "GPU를 사용한 TensorFlow" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "\\[TODO - 문장 1: 문제 요약\\]" -#: ../../source/how-to-run-simulations.rst:158 -msgid "" -"When `using a GPU with TensorFlow `_ " -"nearly your entire GPU memory of all your GPUs visible to the process will " -"be mapped. This is done by TensorFlow for optimization purposes. However, in " -"settings such as FL simulations where we want to split the GPU into multiple " -"`virtual` clients, this is not a desirable mechanism. Luckily we can disable " -"this default behavior by `enabling memory growth `_." -msgstr "" -"`TensorFlow와 함께 GPU를 사용 `_하면 프" -"로세스에 보이는 모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. 이는 최적화 목" -"적으로 TensorFlow에서 수행됩니다. 그러나 GPU를 여러 개의 '가상' 클라이언트로 " -"분할하려는 FL 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 아닙니" -"다. 다행히도 '메모리 증가 활성화 `_'를 통해 이 기본 동작을 비활성화할 수 있습니" -"다." +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "\\[TODO - 문장 2: 솔루션 요약\\]" -#: ../../source/how-to-run-simulations.rst:160 -msgid "" -"This would need to be done in the main process (which is where the server " -"would run) and in each Actor created by the VCE. By means of :code:" -"`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` in order " -"to specify a function to be executed upon actor initialization. In this " -"case, to enable GPU growth for TF workloads. It would look as follows:" -msgstr "" -"이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행" -"해야 합니다. :code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전" -"달하여 액터 초기화 시 실행할 함수를 지정할 수 있습니다. 이 경우 TF 워크로드" -"에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "동기" -#: ../../source/how-to-run-simulations.rst:179 -msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ " -"example." -msgstr "" -"이것이 바로`Tensorflow/Keras Simulation `_ 예제에서 사용된 메커니즘입니다." +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "\\[TODO\\]" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" -msgstr "멀티 노드 설정" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "목표" -#: ../../source/how-to-run-simulations.rst:185 -msgid "" -"The VCE does not currently offer a way to control on which node a particular " -"`virtual` client is executed. In other words, if more than a single node " -"have the resources needed by a client to run, then any of those nodes could " -"get the client workload scheduled onto. Later in the FL process (i.e. in a " -"different round) the same client could be executed by a different node. " -"Depending on how your clients access their datasets, this might require " -"either having a copy of all dataset partitions on all nodes or a dataset " -"serving mechanism (e.g. using nfs, a database) to circumvent data " -"duplication." -msgstr "" -"VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 실행할지 제어하는 방법을 제" -"공하지 않습니다. 즉, 클라이언트가 실행하는 데 필요한 리소스가 하나 이상의 노" -"드에 있는 경우 해당 노드 중 어느 노드에나 클라이언트 워크로드가 예약될 수 있" -"습니다. FL 프로세스 후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 " -"노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에 액세스하는 방식에 따" -"라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피" -"하기 위해 데이터 세트 제공 메커니즘(예: nfs, 데이터베이스 사용)을 사용해야 " -"할 수 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "목표가 아닌 것" -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"By definition virtual clients are `stateless` due to their ephemeral nature. " -"A client state can be implemented as part of the Flower client class but " -"users need to ensure this saved to persistent storage (e.g. a database, " -"disk) and that can be retrieve later by the same client regardless on which " -"node it is running from. This is related to the point above also since, in " -"some way, the client's dataset could be seen as a type of `state`." -msgstr "" -"정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 없음'입니다. 클라이언트 " -"상태는 Flower 클라이언트 클래스의 일부로 구현할 수 있지만, 사용자는 이를 영" -"구 저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 중인 노드와 관계없" -"이 동일한 클라이언트가 검색할 수 있도록 해야 합니다. 이는 어떤 식으로든 클라" -"이언트의 데이터 세트가 일종의 '상태'로 볼 수 있기 때문에 위의 요점과도 관련" -"이 있습니다." +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "제안" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" -msgstr "모델 체크포인트 저장 및 로드" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "단점" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 -msgid "" -"Flower does not automatically save model updates on the server-side. This " -"how-to guide describes the steps to save (and load) model checkpoints in " -"Flower." -msgstr "" -"Flower는 서버 측에서 모델 업데이트를 자동으로 저장하지 않습니다. 이 사용법 가" -"이드에서는 Flower에서 모델 체크포인트를 저장(및 로드)하는 단계에 대해 설명합" -"니다." +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "고려되는 대안" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "모델 체크포인트" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "\\[대안 1\\]" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "\\[대안 2\\]" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Flower Enhancement Doc" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[Enhancement Doc 템플릿](#enhancement-doc-템플릿)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[Metadata](#metadata)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[워크플로우](#워크플로우)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub Issues](#github-issues)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[Google Docs](#google-docs)" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "Flower Enhancement는 다음과 같은 표준화된 개발 프로세스입니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "더 큰 변경 사항을 제안하기 위한 공통 구조를 제공합니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "변화의 동기가 분명한지 확인합니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "버전 관리 시스템에서 프로젝트 정보를 유지합니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "사용자에게 영향력 있는 변화에 대한 동기를 문서화합니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "운행 중 작업 추적을 위한 깃허브 이슈를 예약합니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"Model updates can be persisted on the server-side by customizing :code:" -"`Strategy` methods. Implementing custom strategies is always an option, but " -"for many cases it may be more convenient to simply customize an existing " -"strategy. The following code example defines a new :code:`SaveModelStrategy` " -"which customized the existing built-in :code:`FedAvg` strategy. In " -"particular, it customizes :code:`aggregate_fit` by calling :code:" -"`aggregate_fit` in the base class (:code:`FedAvg`). It then continues to " -"save returned (aggregated) weights before it returns those aggregated " -"weights to the caller (i.e., the server):" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" -":code:`Strategy` 메소드를 사용자 지정하여 서버 측에서 모델 업데이트를 지속할 " -"수 있습니다. 사용자 지정 전략을 구현하는 것은 항상 옵션이지만 대부분의 경우 " -"기존 전략을 간단히 사용자 지정하는 것이 더 편리할 수 있습니다. 다음 코드 예시" -"는 기존의 기본 제공 :code:`FedAvg` 전략을 사용자 지정한 새로운 :code:" -"`SaveModelStrategy`를 정의합니다. 특히, 기본 클래스(:code:`FedAvg`)에서 :" -"code:`aggregate_fit`을 호출하여 :code:`aggregate_fit`을 사용자 지정합니다. 그" -"런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 반환된(집계된) 가" -"중치를 계속 저장합니다:" +"커뮤니티 참여자가 하나 이상의 릴리즈에서 변경 사항을 성공적으로 완료할 수 있도록 하는 동시에 이해 관계자가 프로세스 전반에 걸쳐 " +"적절히 대표되도록 보장합니다" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" -msgstr "파이토치 체크포인트 저장 및 로드" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "따라서 Enhancement 문서에는 다음과 같은 측면이 결합되어 있습니다" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "기능 및 effort-tracking 문서" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "제품 요구 사항 문서" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "디자인 문서" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"Similar to the previous example but with a few extra steps, we'll show how " -"to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be " -"transformed into a list of NumPy ``ndarray``'s, then those are transformed " -"into the PyTorch ``state_dict`` following the ``OrderedDict`` class " -"structure." -msgstr "" -"이전 예제와 비슷하지만 몇 가지 단계가 추가되어 ``torch.save`` 함수를 사용하" -"여 파이토치 체크포인트를 저장하는 방법을 보여드리겠습니다. 먼저, " -"``aggregate_fit``은 ``Parameters`` 객체를 반환하는데, 이 객체는 NumPy " -"``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파" -"이토치 ``state_dict``로 변환됩니다." +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "를 하나의 파일로 통합하여 커뮤니티와 협력해 점진적으로 생성합니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"To load your progress, you simply append the following lines to your code. " -"Note that this will iterate over all saved checkpoints and load the latest " -"one:" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -"진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 " -"저장된 모든 체크포인트를 반복하고 최신 체크포인트를 로드합니다:" +"Flower에 제안된 변경 사항이나 기능을 멀리 가져오는 경우, 프로젝트의 향후 변경 사항을 이해하고 전달하기 위해 단일 " +"GitHub 이슈 또는 pull request를 넘어서는 abstraction이 필요합니다." -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as in " -"the ``initial_parameters`` when defining a ``Strategy``." +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -"``전략``을 정의할 때 ``초기_파라미터``와 같이 필요한 경우 ``파라미터`` 유형" -"의 이 객체를 반환/사용합니다." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "Flower 1.0으로 업그레이드" +"이 프로세스의 목적은 커뮤니티 내 '부족한 지식'의 양을 줄이는 것입니다. 이 프로세스는 Slack 스레드, 영상 통화, 복도 " +"대화에서 나온 의사 결정을 잘 추적된 아티팩트로 옮김으로써 커뮤니케이션과 검색 가능성을 향상시키는 것을 목표로 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable " -"foundation for future growth. Compared to Flower 0.19 (and other 0.x series " -"releases), there are a few breaking changes that make it necessary to change " -"the code of existing 0.x-series projects." +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" -"Flower 1.0이 출시되었습니다. 새로운 기능과 함께 Flower 1.0은 향후 성장을 위" -"한 안정적인 기반을 제공합니다. Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교" -"했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 획기적인 변" -"경 사항이 있습니다." +"대략적으로 사용자를 대상으로 하는 대규모 개선 사항은 개선 프로세스를 따라야 합니다. 개선 사항을 작성자나 개발자 이외의 다른 " +"사람에게 서면 또는 구두로 설명해야 하는 경우에는 개선 문서 작성을 고려하세요." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "업데이트 설치" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +msgid "" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." +msgstr "" +"마찬가지로 개발 커뮤니티의 많은 부분에 영향을 미치는 기술적 노력(리팩토링, 주요 아키텍처 변경)도 널리 알려야 합니다. 개선 " +"프로세스는 일반 사용자나 운영자에게 전혀 영향을 미치지 않더라도 이를 위해 적합합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either pip " -"or Poetry:" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -"다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방" -"법입니다:" +"작은 변경 및 추가의 경우, 개선 프로세스를 거치는 것은 시간이 많이 걸리고 불필요합니다. 예를 들어, 새로운 연합 학습 알고리즘을" +" 추가하는 것은 Flower의 작동 방식이나 사용 방식을 변경하지 않고 기능만 추가하는 것이기 때문입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip: 설치할 때 ``-U``를 추가합니다." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" -msgstr "" -"``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 사용" -"하는 경우)" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "기능 개선은 이미 구현할 수 있는 경로가 마련되어 있고 커뮤니티 구성원들이 지지하는 것이므로 기능 요청과는 다릅니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" -"``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" +"개선 사항은 정의된 템플릿과 참조용으로 Enhancement Doc.를 검토하고 저장하는 워크플로우를 따르는 Markdown 파일에" +" 캡처됩니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 -msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." -msgstr "" -"Poetry: ``pyproject.toml``에서 ``flwr`` dependency을 업데이트한 다음 다시 설" -"치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry." -"lock``을 삭제하는 것을 잊지 마세요)." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "Enhancement Doc 템플릿" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "각 개선 사항 문서는 다음과 같은 구조의 Markdown 파일로 제공됩니다" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 -msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when using " -"``start_simulation``)" -msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " -"(``start_simulation`` 사용 시)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "Metadata ([아래 설명](#metadata) YAML preamble 형식)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "필수 변경 사항" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "Title (metadata와 같게)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." -msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "Table of Contents (필요시)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "일반" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "Notes/Constraints/Caveats (선택 사항)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 -msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" -msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 " -"예시입니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "Design Details (선택 사항)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 -msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" -msgstr "" -"Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "졸업 기준" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 -msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", client=FlowerClient())``" -msgstr "" -"Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:" -"8080\", client=FlowerClient())``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "업그레이드/다운그레이드 전략(해당되는 경우)" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "클라이언트" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "참고로 이 문서는 위의 구조를 따릅니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 -msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" -msgstr "" -"``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " -"get_parameters(self, config):``로 변경합니다" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "Metadata" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" -"``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " -"get_parameters(self, ins: GetParametersIns):``로 변경합니다" +"**피드 번호** (필수) 마지막 Flower Enhancement 문서의 `피드 번호` + 1. 이 번호를 사용하면 다른 제안을 " +"쉽게 참조할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "전략 / ``start_server`` / ``start_simulation``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**제목** (필수) 제안서의 제목을 평이한 언어로 입력합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" -msgstr "" -"Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``" -"에 전달합니다. 다음은 예제입니다:" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "**상태** (필수) 제안의 현재 상태입니다. 가능한 상태는 [워크플로](#워크플로)를 참조하세요." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" -msgstr "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "**저자** (필수) 제안서의 작성자 목록입니다. 간단히 GitHub ID입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"Flower 1.0: ``start_server(..., config=flwr.server." -"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" -msgstr "" -"Flower 1.0: ``start_server(..., config=flwr.server." -"ServerConfig(num_rounds=3, round_timeout=600.0), ...)``" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "**생성 날짜** (필수) PR에서 제안서를 처음 제출한 날짜입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" -msgstr "" -"``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``" -"로 바꿉니다(이전 항목 참조)" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "**마지막 업데이트** (선택 사항) 제안서가 마지막으로 크게 변경된 날짜입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by " -"configuring the strategy to sample all clients for evaluation after the last " -"round of training." -msgstr "" -"'start_server`` 호출에서 ``force_final_distributed_eval`` 매개변수를 제거합니" -"다. 모든 클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모" -"든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 있습니다." +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "**함께 보기** (선택 사항) 이 제안과 관련된 다른 제안 목록입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**대체** (선택 사항) 이 제안이 대체하는 제안 목록입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "**대체됨** (선택 사항) 이 제안이 대체하는 제안의 목록입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "워크플로우" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"Strategy initialization: if the strategy relies on the default values for " -"``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize FedAvg " -"with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" -"전략 초기화: 전략이 ``fraction_fit`` 및 ``fraction_evaluate``의 기본값에 의존" -"하는 경우 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 수동 설정합니" -"다. 전략을 수동으로 생성하지 않는 프로젝트(전략 인스턴스를 전달하지 않고 " -"``start_server`` 또는 ``start_simulation``을 호출하여)는 이제 " -"``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를 수동" -"으로 초기화해야 합니다." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +"개선 사항을 구성하는 아이디어는 이미 커뮤니티에서 논의되었거나 제안된 적이 있어야 합니다. 따라서 개선 사항을 주도하는 사(보통 " +"작성자)이 필요합니다. 이 사람은 또한 제안을 검토할 의향이 있는 Flower 커미터를 찾아야 합니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -"``rnd``의 이름을 ``server_round``로 바꿉니다. 이는 여러 메서드 및 함수(예: " -"``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, " -"``aggregate_evaluate`` 및 ``evaluate_fn``)에 영향을 미칩니다." - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" +"새 개선 사항은 `NNNN-YYYYMMDD-enhancement-title.md` 형식의 파일 이름으로 체크인되며, `NNNN`은 " +"Flower 개선 문서 번호이고 `enhancements`에 해당합니다. 모든 개선 사항은 pull request의 일부로 `잠정`" +" 상태에서 시작됩니다. 토론은 pull request 검토의 일부로 이루어집니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, " -"Dict[str, Scalar]]]:``" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, " -"Dict[str, Scalar]]]:``" +"개선 사항이 검토 및 승인되면 상태가 '구현 가능'으로 변경됩니다. 그런 다음 실제 구현은 별도의 pull requests를 통해 " +"이루어집니다. 이러한 pull requests는 설명의 일부로 해당 개선 사항을 언급해야 합니다. 구현이 완료되면 제안 상태는 " +"'구현됨'으로 변경됩니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: " -"Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: " -"Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "사용자 정의 전략" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "특정 조건에서는 다른 상태도 가능합니다. 개선에는 다음과 같은 상태가 있습니다:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"The type of parameter ``failures`` has changed from ``List[BaseException]`` " -"to ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in " -"``aggregate_fit``) and ``List[Union[Tuple[ClientProxy, EvaluateRes], " -"BaseException]]`` (in ``aggregate_evaluate``)" -msgstr "" -"매개변수 ``failures``의 유형이 ``List[BaseException]``에서 " -"``List[Union[Tuple[ClientProxy], FitRes], BaseException]]``(``aggregate_fit``" -"에서) 및 ``List[Union[Tuple[ClientProxy], EvaluateRes], " -"BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "'잠정적': 개선 사항이 제안되어 활발히 정의되고 있습니다. 제안이 구체화되고 활발하게 정의 및 논의되는 동안의 시작 단계입니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 -msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" -msgstr "" -"이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 " -"번째 파라미터로 받습니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`구현 가능`: 개선 사항이 검토 및 승인되었습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "`구현됨`: 개선 사항이 구현되었으며 더 이상 활발히 변경되지 않습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "'지연됨': 개선 사항이 제안되었지만 아직 활발히 작업 중이 아닙니다." + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -" -"> Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -" -"> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "`거부됨`: 작성자와 검토자는 이 개선 사항을 더 이상 진행하지 않기로 결정했습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "선택적 개선 사항" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`철회`: 작성자가 개선 사항을 철회했습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 -msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" -msgstr "" -"위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "'대체됨': 개선 사항이 새로운 개선 사항으로 대체되었습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then empty " -"placeholder implementations of ``evaluate`` are no longer necessary." +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" -"``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제" -"거합니다. 예를 들어 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시" -"자 구현은 더 이상 필요하지 않습니다." +"GitHub에서 이미 제공하는 프로세스(이슈 및 Pull Requests)에 추가 프로세스를 추가하면 더 복잡해지고 잠재적인 처음인" +" 기여자에게는 장벽이 될 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" -"``start_simulation``을 통해 라운드 타임아웃을 구성합니다: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"현재 기능 이슈 템플릿에서 요구되는 한 문장 설명 이상으로 제안서 템플릿을 확장하는 것은 영어가 모국어가 아닌 사용자에게는 큰 " +"부담이 될 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "추가 도움말" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "GitHub 이슈" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a " -"reference for using the Flower 1.0 API. If there are further questions, " -"`join the Flower Slack `_ and use the channel " -"``#questions``." -msgstr "" -"대부분의 공식 ``Flower code 예제 `_는 이미 Flower 1.0으로 업데이트되어 있으며, Flower 1.0 API를 사용" -"하기 위한 참고 자료로 사용할 수 있습니다. 더 궁금한 점이 있다면 ``플라워 슬" -"랙 `_에 가입하여 ``#questions`` 채널을 이용하" -"세요." +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." +msgstr "" +"이러한 종류의 개선을 위해 GitHub 이슈를 사용하면 가능합니다. 예를 들어 태그를 사용하여 다른 이슈와 구별하고 필터링할 수 " +"있습니다. 주요 이슈는 개선 사항에 대해 토론하고 검토하는 것입니다: GitHub 이슈에는 댓글 스레드가 하나만 있습니다. 개선 " +"사항에는 일반적으로 문서의 여러 부분에 대해 동시에 여러 개의 토론 스레드가 있습니다. GitHub 이슈를 사용할 때 이러한 여러 " +"토론을 관리하면 혼란스러울 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" -msgstr "Flower Next 업그레이드" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "Google 문서 도구" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! Whether " -"you're a seasoned user or just getting started, this guide will help you " -"smoothly transition your existing setup to take advantage of the latest " -"features and improvements in Flower Next, starting from version 1.8." +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -"Flower에서 Flower Next로의 업데이트를 위한 이동 가이드에 오신 것을 환영합니" -"다! 이 가이드는 숙련된 사용자든 이제 막 시작한 사용자든 상관없이 기존 설정을 " -"원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " -"수 있도록 도와드립니다." +"Google 문서는 여러 스레드의 토론을 허용합니다. 하지만 Google 문서는 프로젝트 외부에서 호스팅되므로 커뮤니티에서 검색할 " +"수 있도록 관리해야 합니다. 모든 제안에 대한 링크 목록을 관리하고 커뮤니티에 제공해야 합니다. Flower 저장소의 일부로 " +"제안서를 보낼 때와 비교하면 링크가 누락될 가능성이 훨씬 더 높습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:9 -msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another guide, " -"we will show how to run Flower Next end-to-end with pure Flower Next APIs." -msgstr "" -"이 가이드에서는 Flower Next의 *호환성 레이어*를 사용하여 최소한의 코드 변경으" -"로 ``1.8`` 이전의 Flower 코드를 재사용하는 방법을 보여줍니다. 다른 가이드에서" -"는 순수한 Flower Next API로 Flower Next를 end-to-end로 실행하는 방법을 보여드" -"리겠습니다." +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Flower 개선 문서" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" -msgstr "자세히 알아봅시다!" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "종합 평가 결과" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Here's how to update an existing installation of Flower to Flower Next with " -"``pip``:" -msgstr "" -"기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 " -"같습니다:" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 서버는 평가 결과를 집계하는 방법을 규정하고 있지 않지만 사용자가 결과 집계를 완전히 사용자 지정할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" -msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "사용자 지정 평가 결과 집계" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"Ensure you set the following version constraint in your ``requirements.txt``" -msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" +msgstr "" +"동일한 :code:`Strategy`-사용자 지정 방식을 사용하여 개별 클라이언트로부터 오는 사용자 지정 평가 결과를 집계할 수 " +"있습니다. 클라이언트는 dictionary를 반환하여 사용자 지정 지표를 서버에 반환할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" -msgstr "또는 ``pyproject.toml``:" +#: ../../source/how-to-aggregate-evaluation-results.rst:39 +msgid "" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "그런 다음 서버는 사용자 지정 전략을 사용하여 이러한 dictionaries에서 제공하는 메트릭을 집계할 수 있습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -msgid "Using Poetry" -msgstr "Poetry 사용" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" +msgstr "SuperNodes 인증하기" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -"``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``" -"poetry install``을 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 " -"삭제하는 것을 잊지 마세요)." +"Flower는 SuperLink에 연결하는 각 SuperNodes의 신원을 확인하는 데 사용할 수 있는 인증된 SuperNodes에" +" 대한 기본 지원을 제공합니다. Flower 노드 인증은 GitHub SSH 인증 방식과 유사하게 작동합니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 -msgid "" -"Ensure you set the following version constraint in your ``pyproject.toml``:" -msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "SuperLink(서버)는 알려진 (클라이언트) 노드 공개키 목록을 저장합니다" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, you " -"create a |clientapp_link|_ and start it via the command line. Instead of " -"starting a server in code via ``start_server()``, you create a |" -"serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you to " -"run your project both in the traditional way and in the Flower Next way:" -msgstr "" -"Flower Next에서는 *infrastructure*와 *application layers*가 분리되었습니다. " -"코드에서 ``start_client()``를 통해 클라이언트를 시작하는 대신, 명령줄을 통해 " -"|clientapp_link|_를 생성하여 시작합니다. 코드에서 ``start_server()``를 통해 " -"서버를 시작하는 대신 |serverapp_link|_를 생성하고 명령줄을 통해 서버를 시작합" -"니다. 서버와 클라이언트의 장기 실행 컴포넌트를 SuperLink와 SuperNode라고 합니" -"다. 수동 업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로" -"젝트를 실행할 수 있는 non-breaking 변경 사항은 다음과 같습니다:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -msgid "|clientapp_link|_" -msgstr "|clientapp_link|_" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "SuperNode와 SuperLink는 ECDH를 사용하여 독립적으로 공유된 비밀을 도출합니다" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it via " -"|startclient_link|_. Here's an example:" -msgstr "" -"|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 " -"래핑하세요. 다음은 예시입니다:" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "비밀 공유는 SuperNode에서 SuperLink로 토큰으로 전송된 메시지의 HMAC 값을 계산하는 데 사용됩니다" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -msgid "|serverapp_link|_" -msgstr "|serverapp_link|_" +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" +msgstr "SuperLink가 토큰을 확인합니다" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-authenticate-supernodes.rst:14 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting the " -"server via |startserver_link|_. Here's an example:" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -"서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략" -"을 |serverapp_link|_로 래핑하세요. 다음은 예시입니다:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" -msgstr "배포" +"인증된 환경에서 Flower로 연합 학습을 시연하는 전체 '코드 예제 " +"`_를 확인하는 것이 좋습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in " -"sequence, |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|" -"_. There is no need to execute `client.py` and `server.py` as Python scripts." -msgstr "" -"실행하기 전에 |flowernext_superlink_link|_를 사용하여 ``SuperLink``를 실행한 " -"후 |flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 " -"순서대로 실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 " -"필요는 없습니다." +"This guide covers a preview feature that might change in future versions " +"of Flower." +msgstr "이 가이드에서는 향후 버전의 Flower에서 변경될 수 있는 미리보기 기능에 대해 설명합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" -"Here's an example to start the server without HTTPS (only for prototyping):" -msgstr "" -"다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." +msgstr "보안을 강화하기 위해 노드 인증은 암호화된 연결(SSL/TLS)을 사용하도록 설정한 경우에만 사용할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-authenticate-supernodes.rst:28 +#, fuzzy +msgid "Enable node authentication in ``SuperLink``" +msgstr ":code:`SuperLink`에서 노드 인증 활성화" + +#: ../../source/how-to-authenticate-supernodes.rst:30 +#, fuzzy msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, " -"``--ssl-certfile``, and ``--ssl-keyfile`` command line options to pass paths " -"to (CA certificate, server certificate, and server private key)." -msgstr "" -"다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-" -"certfile``, '`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 " -"및 서버 개인 키)의 경로를 전달합니다." +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" +msgstr "" +"노드 인증을 활성화하려면 먼저 SuperLink<>SuperNode 통신을 보호하기 위해 SSL/TLS 연결을 구성해야 합니다. " +"전체 가이드는 `여기 `_에서 확인할 수 있습니다. 보안 연결을 구성한 후, 장기 실행하는 Flower " +":code:`SuperLink`에서 클라이언트 인증을 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 보안 연결과 노드 인증이 " +"모두 활성화된 Flower :code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -msgid "Simulation in CLI" -msgstr "CLI 시뮬레이션" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" +msgstr "인증 플래그를 세분화해 보겠습니다:" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-authenticate-supernodes.rst:49 +#, fuzzy msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and |" -"serverapp_link|_, respectively. There is no need to use |startsim_link|_ " -"anymore. Here's an example:" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -"기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하" -"세요. 더 이상 |startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" +"첫 번째 플래그 :code:`--auth-list-public-keys`는 알려진 모든 노드 공개키를 저장하는 CSV 파일의 경로를" +" 기대합니다. federation에 참여하도록 허용된 모든 알려진 노드 공개 키를 하나의 CSV 파일(:code:`.csv`)에 " +"저장해야 합니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script. " -"Here's an example (assuming the ``server_app`` and ``client_app`` objects " -"are in a ``sim.py`` module):" +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"CLI에서 |flower_simulation_link|_를 실행하고 Python 스크립트를 실행하는 대신 " -"코드에서 ``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다" -"(``server_app`` 및 ``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" +"알려진 노드 공개키를 저장하는 유효한 CSV 파일은 쉼표로 구분하고 주석 없이 OpenSSH 형식으로 키를 나열해야 합니다. 예를 " +"들어, 두 개의 알려진 노드 공개키가 포함된 CSV 파일이 포함된 코드 샘플을 참조하세요." -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-authenticate-supernodes.rst:57 +#, fuzzy msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the ``client_resources`` " -"argument in |startsim_link|_. Here's an example:" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -"|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-" -"config`` 명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설" -"정하세요. 다음은 예시입니다:" +"두 번째 및 세 번째 플래그 :code:`--auth-superlink-private-key` 및 :code:`--auth-" +"superlink-public-key`는 서버의 개인 및 공개 키의 경로를 예상합니다. 개발 목적으로 :code:`ssh-" +"keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" -msgstr "Notebook에서 시뮬레이션" +#: ../../source/how-to-authenticate-supernodes.rst:64 +msgid "" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +msgstr "" +"Flower 1.9에서는 알려진 노드 공개키를 SuperLink에 동적으로 제거, 편집 또는 추가하는 기능이 지원되지 않습니다. " +"알려진 노드 집합을 변경하려면 서버를 종료하고 CSV 파일을 편집한 다음 서버를 다시 시작해야 합니다. 알려진 노드 집합을 동적으로" +" 변경하는 기능은 Flower 1.10(출시 예정일: 6월)에서 로드맵에 포함되어 있습니다." + +#: ../../source/how-to-authenticate-supernodes.rst:71 +#, fuzzy +msgid "Enable node authentication in ``SuperNode``" +msgstr ":code:`SuperNode`에서 노드 인증을 활성화합니다" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-authenticate-supernodes.rst:73 +#, fuzzy msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an " -"example:" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -"notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입" -"니다:" +"장기 실행 중인 Flower 서버(:code:`SuperLink`)와 마찬가지로, 장기 실행 중인 Flower " +"클라이언트(:code:`SuperNode`)에서도 노드 인증을 쉽게 활성화할 수 있습니다. 다음 터미널 명령을 사용하여 인증된 " +":code:`SuperNode`를 시작하세요:" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-authenticate-supernodes.rst:85 +#, fuzzy msgid "" -"Some official `Flower code examples `_ are " -"already updated to Flower Next so they can serve as a reference for using " -"the Flower Next API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``. You " -"can also `participate in Flower Discuss `_ where " -"you can find us answering questions, or share and learn from others about " -"migrating to Flower Next." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -"일부 공식 ``Flower 코드 예제 `_는 이미 플라" -"워 넥스트에 업데이트되어 있으므로 플라워 넥스트 API를 사용하는 데 참고할 수 " -"있습니다. 더 궁금한 점이 있다면 ``플라워 슬랙 `_에 가입하고 ``#questions`` 채널을 이용하세요. 또한, ``Flower Discuss " -"`_에 참여하여 질문에 대한 답변을 확인하거나 다른 " -"사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." +":code:`--auth-supernode-private-key` 플래그는 노드의 개인 키 파일 경로를, :code:`--auth-" +"supernode-public-key` 플래그는 노드의 공개 키 파일 경로를 예상합니다. 개발 목적으로 :code:`ssh-" +"keygen -t ecdsa -b 384`를 사용하여 개인 및 공개 키 쌍을 생성할 수 있습니다." -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -msgid "Important" -msgstr "중요" +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" +msgstr "보안 공지" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으" -"로 업데이트될 예정입니다. 피드백이 있으면 언제든지 공유해 주세요!" - -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" -msgstr "행복한 마이그레이션! 🚀" +"시스템의 보안은 SuperLink와 각SuperNode의 자격 증명에 의존합니다. 따라서 공개키 기반구조(PKI) 사칭 공격과 같은" +" 보안 위험을 피하기 위해 자격 증명을 보호하고 안전하게 보관하는 것이 필수적입니다. 노드 인증 메커니즘에는 사람의 상호 작용도 " +"포함되므로 모든 통신이 신뢰할 수 있는 통신 방법을 사용하여 안전한 방식으로 이루어지도록 하세요." -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" -msgstr "기본 제공 모드 사용" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "결론" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:102 +#, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and " -"interfaces may change in future versions.**" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -"**참고: 이 튜토리얼은 실험적인 기능을 다룹니다. 기능 및 인터페이스는 향후 버" -"전에서 변경될 수 있습니다.**" +"이제 노드 인증이 활성화된 상태에서 장기간 실행되는 Flower 서버(:code:`SuperLink`)와 " +"클라이언트(:code:`SuperNode`)를 시작하는 방법을 배웠을 것입니다. 또한 보안 위험을 최소화하기 위해 개인키의 중요성을" +" 알고 안전하게 보관해야 합니다." + +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "클라이언트 구성" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment the " -"behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us " -"to perform operations before and after a task is processed in the " -"``ClientApp``." +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -"이 튜토리얼에서는 내장 모드를 활용하여 ``ClientApp``의 동작을 보강하는 방법" -"을 배우겠습니다. Mods(Modifiers라고도 함)를 사용하면 ``ClientApp``에서 작업" -"이 처리되기 전과 후에 작업을 수행할 수 있습니다." +"모델 파라미터와 함께 Flower는 설정 값을 클라이언트에 전송할 수 있습니다. 구성 값은 다양한 용도로 사용할 수 있습니다. 예를" +" 들어 서버에서 클라이언트 측 하이퍼파라미터를 제어하는 데 널리 사용되는 방법입니다." -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" -msgstr "Mods란 무엇인가요?" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" +msgstr "구성 값" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or " -"inspect the incoming ``Message`` and the resulting outgoing ``Message``. The " -"signature for a ``Mod`` is as follows:" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"Mod는 ``ClientApp``을 감싸는 콜러블입니다. 들어오는 ``Message``와 그 결과로 " -"나가는 ``Message``를 조작하거나 검사할 수 있습니다. ``Mod``의 시그니처는 다음" -"과 같습니다:" - -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" -msgstr "일반적인 mod 함수는 다음과 같은 모습일 수 있습니다:" - -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" -msgstr "Mods 사용" +"구성 값은 ``str`` 키와 ``bool``, ``bytes``, ``double``(64비트 정밀도 정수), ``int`` 또는" +" ``str``(또는 다른 언어의 동등한 유형) 유형의 값으로 구성된 사전으로 표현됩니다. 다음은 Python의 구성 사전 " +"예제입니다:" -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "``ClientApp``에서 mods를 사용하려면 다음 단계를 따르세요:" +#: ../../source/how-to-configure-clients.rst:25 +msgid "" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." +msgstr "" +"Flower는 이러한 구성 dictionaries(또는 줄여서 *config dict*)를 ProtoBuf 표현으로 직렬화하고, " +"gRPC를 사용하여 클라이언트로 전송한 다음 다시 Python dictionaries로 역직렬화합니다." -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" -msgstr "1. 필요한 mods를 가져옵니다" +#: ../../source/how-to-configure-clients.rst:31 +msgid "" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." +msgstr "" +"현재 구성 사전에서 컬렉션 유형(예: ``Set``, ``List``, ``Map``)을 값으로 직접 전송하는 기능은 지원되지 " +"않습니다. 컬렉션을 지원되는 값 유형 중 하나로 변환한 다음 클라이언트 측에서 다시 변환하여 값으로 보내는 몇 가지 해결 방법이 " +"있습니다." -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" -msgstr "먼저 사용하려는 기본 제공 mod를 가져옵니다:" +#: ../../source/how-to-configure-clients.rst:36 +msgid "" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." +msgstr "" +"예를 들어 부동 소수점 숫자 목록을 JSON 문자열로 변환한 다음 구성 dictionary을 사용하여 JSON 문자열을 전송한 다음" +" 클라이언트에서 다시 부동 소수점 숫자 목록으로 변환할 수 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" -msgstr "2. 클라이언트 기능 정의" +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" +msgstr "기본 제공 전략을 통한 구성" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-configure-clients.rst:43 +#, fuzzy msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "mod(s)로 래핑할 클라이언트 함수('``client_fn``)를 정의합니다:" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." +msgstr "" +"클라이언트에 구성 값을 보내는 가장 쉬운 방법은 :code:`FedAvg`와 같은 기본 제공 전략을 사용하는 것입니다. 기본 제공 " +"전략은 소위 구성 함수를 지원합니다. 구성 함수는 내장 전략이 현재 단계의 구성 사전을 가져오기 위해 호출하는 함수입니다. 그런 " +"다음 해당 단계 동안 선택된 모든 클라이언트에 구성 사전을 전달합니다." -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" -msgstr "3. mods로 ``ClientApp``을 생성합니다" +#: ../../source/how-to-configure-clients.rst:49 +msgid "" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" +msgstr "" +"간단한 예부터 시작하겠습니다. (a) 클라이언트가 사용해야 하는 배치 크기, (b) 현재 글로벌 연합 라운드, (c) 클라이언트 " +"측에서 학습할 에포크 수를 전송하고 싶다고 가정해 보겠습니다. 구성 함수는 다음과 같습니다:" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-configure-clients.rst:65 +#, fuzzy msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" msgstr "" -"``ClientApp``을 생성하고 mods를 ``mods`` argument에 목록으로 전달합니다. mods" -"를 제공하는 순서가 중요합니다:" +"기본 제공 전략이 이 함수를 사용하도록 하려면 초기화 중에 매개 변수 :code:`on_fit_config_fn`을 사용하여 " +"``FedAvg``에 이 함수를 전달하면 됩니다:" -#: ../../source/how-to-use-built-in-mods.rst:72 -msgid "Order of execution" -msgstr "실행 순서" +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "클라이언트 측에서는 ``fit``으로 구성 dictionary을 받습니다:" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "``ClientApp``이 실행되면 목록에 제공된 순서대로 모드가 실행됩니다:" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." +msgstr "" +"평가를 구성하는 `on_evaluate_config_fn`도 있으며, 같은 방식으로 작동합니다. 다른 배치 크기를 사용하기 위해 " +"다른 구성 값을 `evaluate`로 보내려고 할 수 있기 때문에 이 함수는 별도의 함수입니다." + +#: ../../source/how-to-configure-clients.rst:90 +msgid "" +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" +msgstr "" +"기본 제공 전략은 매 라운드마다 이 함수를 호출합니다(즉, `Strategy.configure_fit` 또는 " +"`Strategy.configure_evaluate`가 실행될 때마다). 매 라운드마다 `on_evaluate_config_fn`을" +" 호출하면 연속된 라운드에서 config dict를 변경/변경할 수 있습니다. 예를 들어 이후 라운드에서 로컬 에포크 수를 늘리기 " +"위해 하이퍼파라미터 일정을 구현하려면 다음과 같이 할 수 있습니다:" + +#: ../../source/how-to-configure-clients.rst:107 +#, fuzzy +msgid "The ``FedAvg`` strategy will call this function *every round*." +msgstr ":code:`FedAvg` 전략은 이 함수를 *매 라운드마다* 호출합니다." -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" -msgstr "``example_mod_1``(가장 바깥쪽 mod)" +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" +msgstr "개별 클라이언트 구성" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" -msgstr "``example_mod_2`` (다음 mod)" +#: ../../source/how-to-configure-clients.rst:112 +msgid "" +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "경우에 따라 다른 구성 값을 다른 클라이언트에 보내야 하는 경우도 있습니다." -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-configure-clients.rst:115 +#, fuzzy msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" -"Message handler(들어오는 ``Message``를 처리하고 나가는 ``Message``를 반환하" -"는 핵심 함수)" - -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" -msgstr "``example_mod_2``(돌아가는 방법)" +"이는 기존 전략을 사용자 지정하거나 :doc:`implementing a custom strategy from scratch " +"`를 통해 수행할 수 있습니다. 다음은 사용자 지정 ``\"hello\"'를 " +"추가하여 :code:`FedAvg`를 사용자 지정하는 무의미한 예입니다: \"world\"`` 구성 키/값 쌍을 *단일 " +"클라이언트*의 config dict에 추가합니다(목록의 첫 번째 클라이언트만, 이 라운드의 다른 클라이언트는 이 \"특별한\" 구성" +" 값을 수신하지 않음):" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "``example_mod_1``(돌아가는 방법에 가장 바깥쪽 모드)" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "로깅 구성" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` before " -"passing it to the next mod, and likewise with the outgoing ``Message`` " -"before returning it up the stack." +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -"각 mod는 다음 mod로 전달하기 전에 들어오는 ``Message``를 검사하고 수정할 기회" -"가 있으며, 스택 위로 반환하기 전에 나가는 ``Message``도 마찬가지로 검사하고 " -"수정할 수 있습니다." +"Flower 로거는 federated 학습 워크로드에서 발생하는 모든 핵심 이벤트를 추적합니다. 기본적으로 표준 메시지 형식에 따라" +" 정보를 표시합니다:" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-configure-logging.rst:13 +#, fuzzy msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of mods " -"is crucial and affects how the input and output are processed." +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" msgstr "" -"이 가이드를 따라 mods를 효과적으로 사용하여 ``ClientApp``의 기능을 향상시키" -"는 방법을 배웠습니다. mods 순서는 매우 중요하며 입력과 출력이 처리되는 방식" -"에 영향을 미친다는 점을 기억하세요." - -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 보세요!" +"로그 메시지 수준(예: :code:`INFO`, :code:`DEBUG`), 타임스탬프, 로깅이 발생한 줄, 로그 메시지 자체 등 " +"관련 정보를 포함합니다. 이러한 방식으로 로거는 일반적으로 다음과 같은 정보를 터미널에 표시합니다:" -#: ../../source/how-to-use-differential-privacy.rst:2 -msgid "Use Differential Privacy" -msgstr "차분 개인정보 보호 사용" +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" +msgstr "파일에 로그 저장" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-configure-logging.rst:37 +#, fuzzy msgid "" -"This guide explains how you can utilize differential privacy in the Flower " -"framework. If you are not yet familiar with differential privacy, you can " -"refer to :doc:`explanation-differential-privacy`." -msgstr "" -"이 가이드에서는 Flower 프레임워크에서 차분 개인정보 보호 기능을 활용하는 방법" -"을 설명합니다. 차분 개인정보 보호에 대해 아직 익숙하지 않은 경우 :doc:" -"`explanation-differential-privacy`를 참조하세요." +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" +msgstr "" +"기본적으로 Flower 로그는 Federated 학습 워크로드를 실행하는 터미널에 출력됩니다. 이는 gRPC 기반 " +"페더레이션(즉,:code:`fl.simulation.start_simulation`를 실행하는 경우)과 " +":code:`VirtualClientEngine`을 사용하는 경우(즉, " +":코드:`fl.simulation.start_simulation`을 실행하는 경우) 모두에 적용됩니다. 경우에 따라 이 로그를 " +"디스크에 저장하고 싶을 수도 있습니다. 이 경우 `fl.common.logger.configure() " +"`_" +" 함수를 호출하여 저장할 수 있습니다. 예를 들어:" + +#: ../../source/how-to-configure-logging.rst:59 +#, fuzzy +msgid "" +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" +msgstr "" +"위와 같이 하면 Flower는 터미널에 표시되는 로그를 :code:`log.txt`에 기록합니다. 이 파일은 코드를 실행한 " +"디렉터리와 동일한 디렉터리에 생성됩니다. 검사해보면 위의 로그도 기록되지만 각 줄 앞에 :code:`identifier` 접두사가 " +"붙는 것을 확인할 수 있습니다:" + +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" +msgstr "나만의 메시지 기록" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free " -"contact us to discuss your requirements and to receive guidance on how to " -"best use these features." +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -"Flower의 차분 개인정보 보호는 현재 프리뷰 단계에 있습니다. 민감한 데이터가 있" -"는 프로덕션 환경에서 이러한 기능을 사용할 계획이라면 언제든지 문의하여 요구 " -"사항을 논의하고 이러한 기능을 가장 잘 사용하는 방법에 대한 안내를 받으세요." +"애플리케이션과 관련된 메시지를 더 추가하여 Flower 로거에 기본적으로 표시되는 정보를 확장할 수 있습니다. 다음과 같이 쉽게 " +"추가할 수 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-configure-logging.rst:114 msgid "" -"This approach consists of two seprate phases: clipping of the updates and " -"adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on the " -"server side or the client side." -msgstr "" -"이 접근 방식은 업데이트 클리핑과 집계된 모델에 노이즈 추가라는 두 가지 단계" -"로 구성됩니다. 클리핑 단계의 경우, Flower 프레임워크는 클리핑을 서버 측에서 " -"수행할지 클라이언트 측에서 수행할지 결정할 수 있도록 했습니다." +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "이렇게 하면 로거에 기본 메시지 외에 위에서 지정한 대로 클라이언트가 소개한 메시지가 표시됩니다." -#: ../../source/how-to-use-differential-privacy.rst:15 -msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to the " -"need to perform the clipping operation for all clients." -msgstr "" -"**Server-side Clipping**: 이 방식은 서버가 모든 클라이언트의 업데이트에 대해 " -"균일한 클리핑을 적용하고 클리핑 값에 대한 통신 오버헤드를 줄일 수 있다는 장점" -"이 있습니다. 하지만 모든 클라이언트에 대해 클리핑 작업을 수행해야 하기 때문" -"에 서버의 계산 부하가 증가한다는 단점도 있습니다." +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" +msgstr "원격 서비스에 로그인" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-configure-logging.rst:142 +#, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the " -"computational overhead on the server. However, it also has the disadvantage " -"of lacking centralized control, as the server has less control over the " -"clipping process." +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" -"**Client-side Clipping**: 이 방식은 서버의 계산 오버헤드를 줄일 수 있다는 장" -"점이 있습니다. 하지만 서버가 클리핑 프로세스에 대한 통제력이 떨어지기 때문에 " -"centralized 제어가 부족하다는 단점도 있습니다." +"또한 :code:`fl.common.logger.configure` 함수를 사용하면 네이티브 Python " +":code:`logging.handler.HTTPHandler`를 통해 로그를 푸시할 수 있는 호스트를 지정할 수 " +"있습니다(:code:`POST`를 통해). 이는 모든 엔티티(예: 서버 및 클라이언트)에서 로그를 수집하는 것이 번거로울 수 있는 " +":code:`gRPC` 기반 Federated 학습 워크로드에서 특히 유용한 기능입니다. Flower 시뮬레이션에서는 서버가 모든 " +"로그를 자동으로 표시합니다. 로그를 다른 곳에 백업하거나 분석하려는 경우 :code:`HTTPHandler`를 지정할 수 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:21 -msgid "Server-side Clipping" -msgstr "서버 측 클리핑" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "SSL 연결 사용" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-enable-ssl-connections.rst:4 +#, fuzzy msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are :code:" -"`DifferentialPrivacyServerSideFixedClipping` and :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and adaptive " -"clipping." +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" -"서버 측 클리핑이 있는 중앙 DP의 경우, 실제 :code:`Strategy` 인스턴스를 감싸" -"는 래퍼 역할을 하는 두 개의 :code:`Strategy` 클래스가 있습니다(예: :code:" -"`FedAvg`). 두 개의 래퍼 클래스는 고정 및 적응형 클리핑을 위한 :code:" -"`DifferentialPrivacyServerSideFixedClipping`과 :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping`입니다." - -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "server side clipping" -msgstr "서버 측 클리핑" +"이 가이드에서는 SSL을 지원하는 보안 Flower 서버(:코드:`SuperLink`)를 시작하는 방법과 Flower " +"클라이언트(:코드:`SuperNode`)가 이 서버에 보안 연결을 설정하는 방법을 설명합니다." -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-side " -"fixed clipping using the :code:`DifferentialPrivacyServerSideFixedClipping` " -"wrapper class. The same approach can be used with :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -"아래 코드 샘플은 :code:`FedAvg` 전략이 :code:" -"`DifferentialPrivacyServerSideFixedClipping` 래퍼 클래스를 사용하여 서버 측 " -"고정 클리핑을 사용할 수 있도록 합니다. 해당 입력 매개변수를 조정하여 :code:" -"`DifferentialPrivacyServerSideAdaptiveClipping`과 동일한 접근 방식을 사용할 " -"수 있습니다." +"보안 연결을 보여주는 전체 코드 예제는 '여기 " +"`_'에서 확인할 수 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:52 -msgid "Client-side Clipping" -msgstr "클라이언트 측 클리핑" - -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-enable-ssl-connections.rst:11 +#, fuzzy msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower :" -"code:`Mods` to perform the clipping. Two mods are available for fixed and " -"adaptive client-side clipping: :code:`fixedclipping_mod` and :code:" -"`adaptiveclipping_mod` with corresponding server-side wrappers :code:" -"`DifferentialPrivacyClientSideFixedClipping` and :code:" -"`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" -"클라이언트 측 클리핑이 있는 중앙 DP의 경우 서버는 각 라운드마다 선택한 클라이" -"언트에 클리핑 값을 보냅니다. 클라이언트는 기존 Flower :code:`Mods`를 사용하" -"여 클리핑을 수행할 수 있습니다. 고정 및 적응형 클라이언트 측 클리핑에는 두 가" -"지 모드를 사용할 수 있습니다: :code:`fixedclipping_mod` 및 :code:" -"`adaptiveclipping_mod`와 해당 서버 측 래퍼 :code:" -"`DifferentialPrivacyClientSideFixedClipping` 및 :code:" -"`DifferentialPrivacyClientSideAdaptiveClipping`이 있습니다." +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." +msgstr "" +"코드 예제에는 시작 방법을 설명하는 :code:`README.md` 파일이 함께 제공됩니다. 이미 SSL을 사용하도록 설정되어 " +"있지만 그 방법에 대한 설명이 부족할 수 있습니다. 이 가이드를 참고하여 이 주제에 대해 자세히 알아보세요." -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "client side clipping" -msgstr "클라이언트 측 클리핑" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "인증서" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-enable-ssl-connections.rst:18 +#, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the :code:" -"`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on the " -"client, :code:`fixedclipping_mod`:" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" -"아래 코드 샘플은 :code:`FedAvg` 전략이 클라이언트 측 고정 클리핑과 함께 차분 " -"프라이버시를 사용할 수 있도록 :code:" -"`DifferentialPrivacyClientSideFixedClipping` 래퍼 클래스와 클라이언트에서 :" -"code:`fixedclipping_mod`를 모두 사용하도록 합니다:" +"SSL 사용 연결을 사용하려면 서버와 클라이언트에 인증서를 전달해야 합니다. 이 가이드에서는 자체 서명된 인증서를 생성하겠습니다. " +"이 과정은 상당히 복잡할 수 있으므로 다음 명령 시퀀스를 사용하여 :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`에서 스크립트를 실행하도록 요청하겠습니다:" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-enable-ssl-connections.rst:29 +#, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` needs " -"to configure the matching :code:`fixedclipping_mod` to perform the client-" -"side clipping:" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" -"서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`" -"이 일치하는 :code:`fixedclipping_mod`를 구성해야 합니다:" +"이렇게 하면 :code:`examples/advanced-tensorflow/.cache/certificates`에 인증서가 " +"생성됩니다." -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" -"To utilize local differential privacy (DP) and add noise to the client model " -"parameters before transmitting them to the server in Flower, you can use the " -"`LocalDpMod`. The following hyperparameters need to be set: clipping norm " -"value, sensitivity, epsilon, and delta." +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -"로컬 차분 프라이버시(DP)를 활용하고 클라이언트 모델 파라미터를 서버로 전송하" -"기 전에 노이즈를 추가하려면 `LocalDpMod`를 사용하면 됩니다. 클리핑 노멀 값, " -"감도, 엡실론, 델타 등의 하이퍼파라미터를 설정해야 합니다." +"이 예의 맥락에서 SSL 인증서를 생성하는 접근 방식은 영감과 출발점이 될 수 있지만 프로덕션 환경에 대한 참조로 사용해서는 안 " +"됩니다. 프로덕션 환경용 인증서를 올바르게 생성하는 문제에 대해서는 다른 출처를 참조하세요. 중요하지 않은 프로토타이핑 또는 연구 " +"프로젝트의 경우, 이 가이드에 언급된 스크립트를 사용하여 생성한 자체 서명 인증서를 사용하는 것으로 충분할 수 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" -msgstr "로컬 DP mod" +#: ../../source/how-to-enable-ssl-connections.rst:40 +msgid "Server (SuperLink)" +msgstr "서버(SuperLink)" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" +#: ../../source/how-to-enable-ssl-connections.rst:42 +msgid "" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 서버(SuperLink)를 시작합니다:" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" -"Please note that the order of mods, especially those that modify parameters, " -"is important when using multiple modifiers. Typically, differential privacy " -"(DP) modifiers should be the last to operate on parameters." -msgstr "" -"여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서" -"가 중요하다는 점에 유의하세요. 일반적으로 차분 프라이버시(DP) 수정자는 매개변" -"수에서 가장 마지막에 작동해야 합니다." +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "인증서를 제공할 때 서버는 세 가지 인증서 경로의 튜플을 기대합니다: CA 인증서, 서버 인증서 및 서버 개인 키입니다." -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" -msgstr "Privacy Engines을 사용한 로컬 훈련" +#: ../../source/how-to-enable-ssl-connections.rst:56 +msgid "Client (SuperNode)" +msgstr "클라이언트(SuperNode)" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-enable-ssl-connections.rst:58 msgid "" -"For ensuring data instance-level privacy during local model training on the " -"client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, please " -"refer to the Flower examples directory (`Opacus `_, `Tensorflow Privacy `_)." -msgstr "" -"클라이언트 측에서 로컬 모델을 훈련하는 동안 데이터 인스턴스 수준의 개인 정보 " -"보호를 보장하려면 Opacus 및 TensorFlow Privacy와 같은 개인 정보 보호 엔진을 " -"활용하는 것을 고려하세요. 이러한 엔진과 함께 Flower를 사용하는 예제는 Flower " -"examples directory (`Opacus `_, `Tensorflow Privacy `_)를 참조하세요." +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "다음 터미널 명령을 사용하여 이전에 생성한 인증서를 사용하는 클라이언트(SuperNode)를 시작합니다:" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "전략 사용하기" - -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:67 +#, fuzzy msgid "" -"Flower allows full customization of the learning process through the :code:" -"`Strategy` abstraction. A number of built-in strategies are provided in the " -"core framework." -msgstr "" -"Flower는 :code:`Strategy` abstraction를 통해 학습 과정을 완전히 사용자 정의" -"할 수 있습니다. 핵심 프레임워크에는 여러 가지 기본 제공 전략이 제공됩니다." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." +msgstr "코드:`root_certificates`를 설정하면 클라이언트는 PEM 인코딩된 루트 인증서의 파일 경로를 예상합니다." -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-enable-ssl-connections.rst:73 msgid "" -"There are three ways to customize the way Flower orchestrates the learning " -"process on the server side:" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" -"서버 측에서 Flower가 학습 과정을 조율하는 방식을 사용자 지정하는 방법에는 세 " -"가지가 있습니다:" +"이제 주어진 스크립트를 사용하여 자체 서명 인증서를 생성하고, SSL 사용 서버를 시작하고, 클라이언트가 보안 연결을 설정하는 " +"방법을 배웠을 것입니다." -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" +#: ../../source/how-to-enable-ssl-connections.rst:78 +msgid "Additional resources" +msgstr "추가 리소스" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "콜백 함수로 기존 전략 사용자 지정" +#: ../../source/how-to-enable-ssl-connections.rst:80 +msgid "" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "인증서에 대해 더 자세히 알아보고 싶다면 이러한 추가 자료를 참고하세요:" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "새로운 전략 구현" +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" +msgstr "'암호화하세요 `_'" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "기존 전략 사용" +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" +msgstr "`인증봇 `_" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "전략 구현" + +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Flower comes with a number of popular federated learning strategies built-" -"in. A built-in strategy can be instantiated as follows:" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -"Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 " -"제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" +"전략 추상화를 통해 완전한 맞춤형 전략을 구현할 수 있습니다. 전략은 기본적으로 서버에서 실행되는 연합 학습 알고리즘입니다. 전략은" +" 클라이언트를 샘플링하는 방법, 학습을 위해 클라이언트를 구성하는 방법, 업데이트를 집계하는 방법, 모델을 평가하는 방법을 " +"결정합니다. Flower는 아래에 설명된 것과 동일한 API를 기반으로 하는 몇 가지 기본 제공 전략을 제공합니다." + +#: ../../source/how-to-implement-strategies.rst:11 +#, fuzzy +msgid "The ``Strategy`` abstraction" +msgstr ":code:`Strategy` 추상화" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-implement-strategies.rst:13 +#, fuzzy msgid "" -"This creates a strategy with all parameters left at their default values and " -"passes it to the :code:`start_server` function. It is usually recommended to " -"adjust a few parameters during instantiation:" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -"이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:" -"`start_server` 함수에 전달됩니다. 일반적으로 인스턴스화 중에 몇 가지 매개변수" -"를 조정하는 것이 좋습니다:" +"모든 전략 구현은 기본 제공 구현과 타사 구현 모두 추상 기본 클래스인 " +":code:`flwr.server.strategy.Strategy`에서 파생됩니다. 즉, 사용자 정의 전략 구현은 기본 제공 구현과" +" 완전히 동일한 기능을 사용할 수 있습니다." -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "전략 추상화에서는 구현해야 하는 몇 가지 추상적인 메서드를 정의합니다:" + +#: ../../source/how-to-implement-strategies.rst:67 +#, fuzzy +msgid "" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" -"기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 " -"사용하면 전략이 실행 중에 사용자가 제공한 코드를 호출할 수 있습니다." +"새 전략을 생성한다는 것은 이전에 표시된 추상 메서드에 대해 구현하는 새로운 :code:`class`(추상 기본 클래스 " +":code:`Strategy`에서 파생됨)를 구현하는 것을 의미합니다:" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "클라이언트 적합성 및 클라이언트 평가 구성" +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" +msgstr "Flower 서버는 다음 순서로 이러한 메서드를 호출합니다:" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." +msgstr "다음 섹션에서는 이러한 각 방법에 대해 자세히 설명합니다." + +#: ../../source/how-to-implement-strategies.rst:177 +#, fuzzy +msgid "The ``initialize_parameters`` method" +msgstr ":code:`initialize_parameters` 메서드" + +#: ../../source/how-to-implement-strategies.rst:179 +#, fuzzy msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function will " -"be called by the strategy and must return a dictionary of configuration key " -"values pairs that will be sent to the client. It must return a dictionary of " -"arbitrary configuration values :code:`client.fit` and :code:`client." -"evaluate` functions during each round of federated learning." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" -"서버는 매 라운드마다 새로운 설정 값을 클라이언트에 전달하기 위해 " -":code:`on_fit_config_fn`에 함수를 제공할 수 있습니다. 제공된 함수는 전략에 " -"의해 호출되며 클라이언트에 전송될 구성 키 값 쌍의 dictionary를 반환해야 " -"합니다. 연합 학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client." -"fit` 및 :code:`client.evaluate` 함수를 반환해야 합니다." +"code:`initialize_parameters`는 실행을 처음 시작할 때 한 번만 호출됩니다. 이 함수는 초기 전역 모델 " +"파라미터를 직렬화된 형식(즉, :code:`Parameters` 객체)으로 제공하는 역할을 합니다." -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:183 +#, fuzzy msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive the " -"dictionary returned by the :code:`on_fit_config_fn` in its own :code:`client." -"fit()` function." +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" msgstr "" -":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, " -"예를 들어 학습 속도를 조정하기 위해 매 라운드마다 이 값을 잠재적으로 변경하" -"는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` 함수에서 :" -"code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." +"기본 제공 전략은 사용자가 제공한 초기 매개 변수를 반환합니다. 다음 예는 초기 매개 변수를 :code:`FedAvg`에 전달하는 " +"방법을 보여줍니다:" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-implement-strategies.rst:209 +#, fuzzy +msgid "" +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." +msgstr "" +"Flower 서버는 :code:`initialize_parameters`를 호출하여 " +":code:`initial_parameters`에 전달된 파라미터를 반환하거나 :code:`None`을 반환합니다. " +":code:`initial_parameters`에서 반환되는 매개변수가 없는 경우(즉, :code:`None`) 서버는 무작위로 " +"클라이언트 하나를 선택하여 해당 클라이언트에 매개변수를 제공하도록 요청합니다. 이는 편의 기능이며 실제로는 권장하지 않지만 " +"프로토타이핑에는 유용할 수 있습니다. 실제로는 항상 서버 측 매개변수 초기화를 사용하는 것이 좋습니다." + +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also :code:" -"`on_evaluate_config_fn` to customize the configuration sent to :code:`client." -"evaluate()`" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" -":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구" -"성을 사용자 지정하는 :code:`on_evaluate_config_fn`도 있습니다" +"서버 측 파라미터 초기화는 강력한 메커니즘입니다. 예를 들어 이전에 저장한 체크포인트에서 학습을 재개하는 데 사용할 수 있습니다. " +"또한 연합 학습을 사용하여 사전 학습된 모델을 미세 조정하는 등 하이브리드 접근 방식을 구현하는 데 필요한 기본 기능입니다." -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "서버 측 평가 구성" +#: ../../source/how-to-implement-strategies.rst:224 +#, fuzzy +msgid "The ``configure_fit`` method" +msgstr ":code:`configure_fit` 메서드" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-implement-strategies.rst:226 +#, fuzzy msgid "" -"Server-side evaluation can be enabled by passing an evaluation function to :" -"code:`evaluate_fn`." +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" -"서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니" -"다." +":code:`configure_fit`은 다가오는 학 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 의미하나요? " +"라운드를 구성한다는 것은 클라이언트를 선택하고 이 클라이언트에게 어떤 지침을 보낼지 결정하는 것을 의미합니다. " +"code:`configure_fit`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-implement-strategies.rst:239 +#, fuzzy msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides the " -"most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" msgstr "" -"완전한 사용자 지정 전략을 작성하는 것은 조금 더 복잡하지만 유연성이 가장 뛰어" -"납니다. 자세한 내용은 `Implementing Strategies `_ 가이드를 참조하세요." - -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "튜토리얼" - -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "빠른 시작 튜토리얼" +"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " +":code:`configure_fit`에서 다음 단계를 수행합니다:" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "사용 방법 가이드" +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy +msgid "" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" +msgstr "" +":code:`client_manager`를 사용하여 사용 가능한 모든 클라이언트(또는 그 하위 집합)를 무작위로 샘플링합니다(각각 " +":code:`ClientProxy` 개체로 표시됨)" -#: ../../source/index.rst:99 -msgid "Legacy example guides" -msgstr "레거시 예제 가이드" +#: ../../source/how-to-implement-strategies.rst:245 +#, fuzzy +msgid "" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" +msgstr "" +"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " +"dict를 보유한 동일한 :code:`FitIns`와 쌍을 이룹니다" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "설명" +#: ../../source/how-to-implement-strategies.rst:248 +#, fuzzy +msgid "" +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." +msgstr "" +"보다 정교한 구현은 :code:`configure_fit`을 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 있습니다. " +"클라이언트는 :code:`configure_fit`에서 반환된 목록에 해당 :code:`ClientProxy`가 포함된 경우에만 " +"라운드에 참여합니다." -#: None:-1 -msgid "API reference" -msgstr "API 참조" +#: ../../source/how-to-implement-strategies.rst:254 +#, fuzzy +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." +msgstr "" +"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. instructions은 클라이언트별로 정의되므로 각 클라이언트에 서로 " +"다른 명령어를 전송할 수 있습니다. 이를 통해 예를 들어 클라이언트마다 다른 모델을 학습시키거나 클라이언트마다 다른 하이퍼파라미터를" +" 사용하는 사용자 지정 전략을 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "참조 문서" +#: ../../source/how-to-implement-strategies.rst:261 +#, fuzzy +msgid "The ``aggregate_fit`` method" +msgstr ":code:`aggregate_fit` 메서드" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" -msgstr "기여자 튜토리얼" +#: ../../source/how-to-implement-strategies.rst:263 +#, fuzzy +msgid "" +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." +msgstr "" +"code:`aggregate_fit`은 :code:`configure_fit`에서 훈련하도록 선택되고 요청된 클라이언트가 반환한 " +"결과를 집계하는 역할을 담당합니다." -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" -msgstr "기여자 사용법 가이드" +#: ../../source/how-to-implement-strategies.rst:277 +#, fuzzy +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." +msgstr "" +"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " +"없습니다(:code:`configure_fit`을 통해). 따라서 :code:`aggregate_fit`은 " +":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/index.rst:172 -msgid "Contributor explanations" -msgstr "기여자 설명" +#: ../../source/how-to-implement-strategies.rst:282 +#, fuzzy +msgid "" +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." +msgstr "" +"code:`aggregate_fit`은 선택적 :code:`Parameters` 개체와 집계된 메트릭의 dictionary를 " +"반환합니다. :code:`Parameters` 반환 값은 :code:`aggregate_fit`이 제공된 결과가 집계에 충분하지 " +"않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/index.rst:178 -msgid "Contributor references" -msgstr "기여자 참조" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" +msgstr ":code:`configure_evaluate` 메서드" -#: ../../source/index.rst:-1 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." -msgstr "연합 학습을 위한 Python 개발을 쉽게 할 수 있는 주요 Flower 프레임워크의 " -"설명서를 확인하세요." - -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" -msgstr "플라워 프레임워크 문서" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" +msgstr "" +":code:`configure_evaluate`는 다가오는 평가 라운드를 구성하는 역할을 합니다. 이 문맥에서 *구성*은 무엇을 " +"의미하나요? 라운드를 구성한다는 것은 클라이언트를 선택하고 이러한 클라이언트에 전송할 지침을 결정하는 것을 의미합니다. " +":code:`configure_evaluate`의 시그니처를 보면 이를 명확히 알 수 있습니다:" -#: ../../source/index.rst:7 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" msgstr "" -"Flower 문서에 오신 것을 환영합니다. Flower `_는 편한 연합 " -"학습 프레임워크입니다." +"반환 값은 튜플 목록으로, 각 튜플은 특정 클라이언트로 전송될 명령어를 나타냅니다. 전략 구현은 일반적으로 " +":code:`configure_evaluate`에서 다음 단계를 수행합니다:" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "Flower 커뮤니티 가입하기" +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy +msgid "" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" +msgstr "" +"각 :code:`ClientProxy`를 현재 글로벌 모델 :code:`parameters` 및 :code:`config` " +"dict를 보유한 동일한 :code:`EvaluateIns`와 쌍을 이룹니다" -#: ../../source/index.rst:13 +#: ../../source/how-to-implement-strategies.rst:312 +#, fuzzy msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" -"Flower 커뮤니티는 연구원, 엔지니어, 학생, 전문가, 학자 및 기타 애호가들로 구" -"성된 편한 그룹으로 빠르게 성장하고 있습니다." +"보다 정교한 구현은 :code:`configure_evaluate`를 사용하여 사용자 지정 클라이언트 선택 로직을 구현할 수 " +"있습니다. 클라이언트는 :code:`configure_evaluate`에서 반환된 목록에 해당 :code:`ClientProxy`가" +" 포함된 경우에만 라운드에 참여합니다." -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "Slack에 가입하세요" +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." +msgstr "" +"이 반환 값의 구조는 사용자에게 많은 유연성을 제공합니다. 명령어는 클라이언트별로 정의되므로 각 클라이언트에 서로 다른 명령어를 " +"전송할 수 있습니다. 이를 통해 사용자 지정 전략을 통해 예를 들어 클라이언트마다 다른 모델을 평가하거나 클라이언트마다 다른 " +"하이퍼파라미터를 사용할 수 있습니다(:code:`config` dict를 통해)." -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower 프레임워크" +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" +msgstr ":code:`aggregate_evaluate` 메서드" -#: ../../source/index.rst:25 +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to " -"learn more." +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" -"이 사용자 가이드는 Flower를 사용해 기존 머신 러닝 워크로드를 연합된 환경으로 " -"가져오고자 하는 연구자와 개발자를 대상으로 합니다. Flower의 설계 목표 중 하나" -"는 이를 간단하게 만드는 것이었습니다. 자세히 알아보려면 계속 읽어보세요." - -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "튜토리얼" +"code:`aggregate_evaluate`는 :code:`configure_evaluate`에서 선택되어 평가를 요청한 " +"클라이언트가 반환한 결과를 집계하는 역할을 담당합니다." -#: ../../source/index.rst:32 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" -"A learning-oriented series of federated learning tutorials, the best place " -"to start." -msgstr "학습 중심의 연합 학습 튜토리얼 시리즈로, 시작하기에 가장 좋은 곳입니다." +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." +msgstr "" +"물론 실패가 발생할 수 있으므로 서버가 명령을 보낸 모든 클라이언트로부터 결과를 얻을 수 있다는 보장은 " +"없습니다(:code:`configure_evaluate`를 통해). 따라서 :code:`aggregate_evaluate`는 " +":code:`results` 목록뿐만 아니라 :code:`failures` 목록도 받습니다." -#: ../../source/index.rst:61 -msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:" -"`TensorFlow ` | :doc:`🤗 Transformers " -"` | :doc:`JAX ` | :" -"doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:" -"`XGBoost ` | :doc:`Android ` | :doc:`iOS `" -msgstr "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:" -"`TensorFlow ` | :doc:`🤗 Transformers " -"` | :doc:`JAX ` | :" -"doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:" -"`XGBoost ` | :doc:`Android ` | :doc:`iOS `" - -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" -msgstr "파이토치용 동영상 튜토리얼도 만들었습니다:" +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy +msgid "" +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." +msgstr "" +"code:`aggregate_evaluate`는 선택적 :code:`float`(손실)와 집계된 메트릭의 dictionary를 " +"반환합니다. code:`float` 반환 값은 :code:`aggregate_evaluate`가 제공된 결과가 집계에 충분하지 " +"않다고 판단할 수 있으므로(예: 실패 수가 너무 많음) 선택 사항입니다." -#: ../../source/index.rst:68 -msgid "And TensorFlow:" -msgstr "그리고 TensorFlow도:" +#: ../../source/how-to-implement-strategies.rst:352 +#, fuzzy +msgid "The ``evaluate`` method" +msgstr ":code:`evaluate` 메서드" -#: ../../source/index.rst:76 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a specific " -"goal." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" -"문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." +":code:`evaluate`는 서버 측에서 모델 매개변수를 평가하는 역할을 담당합니다. " +"code:`configure_evaluate`/:code:`aggregate_evaluate`와 함께 " +":code:`evaluate`를 사용하면 서버 측과 클라이언트 측(federated) 평가를 모두 수행할 수 있는 전략을 사용할 수" +" 있습니다." -#: ../../source/index.rst:110 +#: ../../source/how-to-implement-strategies.rst:364 +#, fuzzy msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" -"이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 " -"설명하고 토론합니다." +"반환 값은 전략에서 서버 측 평가를 구현할 필요가 없거나 사용자 정의 :code:`evaluate` 메서드가 성공적으로 완료되지 " +"않을 수 있기 때문에(예: 서버 측 평가 데이터를 로드하지 못할 수 있음) 다시 선택 사항으로 설정할 수 있습니다." -#: ../../source/index.rst:120 -msgid "References" -msgstr "참조" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "Flower 설치" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "정보 지향 API 참조 및 기타 참고 자료." +#: ../../source/how-to-install-flower.rst:5 +msgid "Python version" +msgstr "Python 버전" -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" -msgstr ":py:obj:`flwr `\\" +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" +msgstr "안정적인 릴리즈 설치" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." -msgstr "Flower 메인 패키지." +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +msgid "Using pip" +msgstr "pip 사용" -#: ../../source/index.rst:148 -msgid "Contributor docs" -msgstr "기여자 문서" +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" +msgstr "안정적인 릴리즈는 `PyPI `_:: 에서 확인할 수 있습니다::" -#: ../../source/index.rst:150 +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy msgid "" -"The Flower community welcomes contributions. The following docs are intended " -"to help along the way." -msgstr "" -"Flower 커뮤니티는 여러분의 기여를 환영합니다. 다음 문서는 그 과정에서 도움을 " -"드리기 위한 문서입니다." +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" +msgstr "가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr``을 ``simulation``extra와 함께 설치해야 합니다:" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" -msgstr "Flower CLI 참조" +#: ../../source/how-to-install-flower.rst:30 +msgid "Using conda (or mamba)" +msgstr "conda(또는 mamba) 사용" -#: ../../source/ref-api-cli.rst:7 -msgid "flower-simulation" -msgstr "flower 시뮬레이션" +#: ../../source/how-to-install-flower.rst:32 +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "Flower은 'conda-forge' 채널에서도 설치할 수 있습니다." -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower 초연결" +#: ../../source/how-to-install-flower.rst:34 +#, fuzzy +msgid "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" +msgstr "채널에 'conda-forge'를 추가하지 않은 경우 먼저 다음을 실행해야 합니다:" -#: ../../source/ref-api-cli.rst:27 -msgid "flower-client-app" -msgstr "flower 클라이언트 앱" +#: ../../source/how-to-install-flower.rst:42 +#, fuzzy +msgid "" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" +msgstr "conda-forge`` 채널이 활성화되면 ``flwr``을 ``conda``로 설치할 수 있습니다::" -#: ../../source/ref-api-cli.rst:37 -msgid "flower-server-app" -msgstr "flower 서버 프로그램" +#: ../../source/how-to-install-flower.rst:49 +#, fuzzy +msgid "or with ``mamba``:" +msgstr "또는 ``mamba``::" -#: ../../source/ref-api/flwr.rst:2 -msgid "flwr" -msgstr "flwr" +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" +msgstr "설치 확인" -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -msgid "Modules" -msgstr "Modules" +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy +msgid "" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" +msgstr "" +"다음 명령을 사용하여 Flower가 성공적으로 설치되었는지 확인할 수 있습니다. 모든 것이 정상적으로 작동하면 명령줄에 " +"Flower의 버전이 출력됩니다:" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" -msgstr ":py:obj:`flwr.client `\\" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" +msgstr "고급 설치 옵션" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." -msgstr "Flower 클라이언트." +#: ../../source/how-to-install-flower.rst:71 +msgid "Install via Docker" +msgstr "Docker를 통해 설치" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" -msgstr ":py:obj:`flwr.common `\\" +#: ../../source/how-to-install-flower.rst:73 +#, fuzzy +msgid ":doc:`Run Flower using Docker `" +msgstr ":doc:`Docker를 사용하여 Flower를 실행하는 방법 `" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "서버와 클라이언트 간에 공유되는 공통 구성 요소입니다." +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" +msgstr "사전 릴리즈 설치" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" -msgstr ":py:obj:`flwr.server `\\" +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy +msgid "" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" +msgstr "" +"새(불안정할 수 있는) 버전의 Flower는 안정 버전이 출시되기 전에 사전 릴리즈 버전(알파, 베타, 릴리즈 후보)으로 제공되는 " +"경우가 있습니다:" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." -msgstr "Flower 서버." +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" +msgstr "" +"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우 ``flwr`` 사전 릴리즈를 ``simulation`` extra와 함께 " +"설치해야 합니다:" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" -msgstr ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" +msgstr "야간 릴리즈 설치" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -msgid "Flower simulation." -msgstr "Flower 시뮬레이션." +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy +msgid "" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" +msgstr "Flower의 최신 (불안정할 수 있는) 변경 사항은 다음과 같이 야간 릴리즈로 제공됩니다:" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "클라이언트" - -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -msgid "Functions" -msgstr "함수" +#: ../../source/how-to-install-flower.rst:101 +#, fuzzy +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" +msgstr "" +"가상 클라이언트 엔진을 사용하는 시뮬레이션의 경우, ``flwr-nightly``를 ``simulation`` extr와 함께 " +"설치해야 합니다::" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "모니터 시뮬레이션" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -msgid "Run Flower client app." -msgstr "Flower 클라이언트 앱을 실행합니다." +#: ../../source/how-to-monitor-simulation.rst:4 +msgid "" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." +msgstr "" +"Flower를 사용하면 시뮬레이션을 실행하는 동안 시스템 리소스를 모니터링할 수 있습니다. 또한 Flower 시뮬레이션 엔진은 " +"강력하며 클라이언트별 리소스 할당 방법을 결정하고 총 사용량을 제한할 수 있습니다. 리소스 소비에 대한 인사이트를 통해 더 현명한 " +"결정을 내리고 실행 시간을 단축할 수 있습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr ":py:obj:`run_supernode `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:9 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." +msgstr "" +"구체적인 지침은 macOS를 사용 중이고 'Homebrew `_ 패키지 관리자가 설치되어 있다고 " +"가정합니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -msgid "Run Flower SuperNode." -msgstr "Flower SuperNode를 실행합니다." +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" +msgstr "다운로드" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"`Prometheus `_는 데이터 수집에 사용되며, `Grafana " +"`_는 수집된 데이터를 시각화할 수 있게 해줍니다. 이 두 도구는 모두 Flower가 " +"내부적으로 사용하는 `Ray `_와 잘 통합되어 있습니다." -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." -msgstr "Flower 서버에 연결되는 Flower 클라이언트 노드를 시작합니다." +#: ../../source/how-to-monitor-simulation.rst:23 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "구성 파일을 덮어씁니다(장치에 따라 다른 경로에 설치되어 있을 수 있음)." + +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" +msgstr "M1 Mac을 사용 중이라면:" + +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "이전 세대 Intel Mac 장치에서는:" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:40 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\, " -"server\\_address\\, client\\)" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "각 구성 파일을 열고 변경합니다. 장치에 따라 다음 두 명령 중 하나를 사용합니다:" + +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" -":py:obj:`start_numpy_client `\\ \\(\\*\\, " -"server\\_address\\, client\\)" +"를 입력한 다음 파일의 모든 텍스트를 삭제하고 아래에 표시된 새 Prometheus 설정을 붙여넣습니다. 요구 사항에 따라 시간 " +"간격을 조정할 수 있습니다:" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." -msgstr "gRPC 서버에 연결되는 Flower NumPyClient를 시작합니다." +#: ../../source/how-to-monitor-simulation.rst:67 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" +msgstr "" +"이제 Prometheus 구성을 편집한 후 Grafana 구성 파일에 대해서도 동일한 작업을 수행합니다. 이전과 마찬가지로 다음 " +"명령 중 하나를 사용하여 파일을 엽니다:" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" -msgstr "클래스" +#: ../../source/how-to-monitor-simulation.rst:78 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "터미널 편집기가 열리면 이전과 마찬가지로 다음 구성을 적용할 수 있습니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:94 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "축하합니다. 매트릭 트레킹에 필요한 모든 소프트웨어를 다운로드하셨습니다. 이제 시작해 보겠습니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." -msgstr "Flower 클라이언트를 위한 추상 베이스 클래스입니다." +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" +msgstr "매트릭 트래킹" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-monitor-simulation.rst:100 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" -msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, mods\\]\\)" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "Flower 시뮬레이션을 실행하기 전에 방금 설치 및 구성한 모니터링 도구를 시작해야 합니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -msgid "Flower ClientApp." -msgstr "Flower ClientApp." +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "시뮬레이션을 시작할 때 Python 코드에 다음 전달인자를 포함하세요." -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" -msgstr ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." +msgstr "이제 워크로드를 시작할 준비가 되었습니다." -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "NumPy를 사용하는 Flower 클라이언트를 위한 추상 베이스 클래스입니다." +#: ../../source/how-to-monitor-simulation.rst:121 +msgid "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "시뮬레이션이 시작되고 얼마 지나지 않아 터미널에 다음 로그가 표시됩니다:" -#: ../../source/ref-api/flwr.client.rst:52::1 -msgid ":py:obj:`flwr.client.mod `\\" -msgstr ":py:obj:`flwr.client.mod `\\" +#: ../../source/how-to-monitor-simulation.rst:127 +#, fuzzy +msgid "You can look at everything at http://127.0.0.1:8265 ." +msgstr "``_ 에서 모든 것을 볼 수 있습니다." -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -msgid "Flower Built-in Mods." -msgstr "Flower 내장 모드." +#: ../../source/how-to-monitor-simulation.rst:129 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "Ray 대시보드입니다. 메트릭(왼쪽 패널의 가장 아래 옵션)으로 이동할 수 있습니다." -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." +msgstr "" +"또는 오른쪽 위 모서리인 \"Grafana에서 보기\"를 클릭하여 Grafana에서 바로 확인할 수도 있습니다. Ray 대시보드는 " +"시뮬레이션 중에만 액세스할 수 있다는 점에 유의하세요. 시뮬레이션이 종료된 후에는 Grafana를 사용하여 메트릭을 탐색할 수만 " +"있습니다. ``http://localhost:3000/``로 이동하여 Grafana를 시작할 수 있습니다." -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" -msgstr "메소드" +#: ../../source/how-to-monitor-simulation.rst:137 +#, fuzzy +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." +msgstr "" +"시각화를 완료한 후에는 Prometheus와 Grafana를 중지합니다. 그렇지 않으면 실행 중인 동안 컴퓨터에서 포트 " +":code:`3000` 등을 차단하므로 이 작업이 중요합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" +msgstr "리소스 할당" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 평가합니다." +#: ../../source/how-to-monitor-simulation.rst:149 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "Ray 라이브러리가 어떻게 작동하는지 이해해야 시뮬레이션 클라이언트에 시스템 리소스를 효율적으로 할당할 수 있습니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-monitor-simulation.rst:152 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" +msgstr "" +"처음에 시뮬레이션(Ray가 내부에서 처리하는)은 기본적으로 시스템에서 사용 가능한 모든 리소스를 사용하여 시작되며, 이 리소스는 " +"클라이언트 간에 공유됩니다. 그렇다고 해서 모든 클라이언트에게 균등하게 분배하거나 모든 클라이언트에서 동시에 모델 학습이 이루어지는" +" 것은 아닙니다. 이에 대한 자세한 내용은 이 블로그의 뒷부분에서 설명합니다. 다음을 실행하여 시스템 리소스를 확인할 수 있습니다:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." -msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 구체화합니다." +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "Google Colab에서는 이와 유사한 결과가 표시될 수 있습니다:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "그러나 기본값을 덮어쓸 수 있습니다. 시뮬레이션을 시작할 때 다음을 수행합니다(모두 덮어쓸 필요는 없음):" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -msgid "Get the run context from this client." -msgstr "이 클라이언트에서 실행 컨텍스트를 가져옵니다." +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." +msgstr "단일 클라이언트에 대한 리소스도 지정해 보겠습니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-monitor-simulation.rst:225 msgid "" -":py:obj:`get_parameters `\\ \\(ins\\)" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" -":py:obj:`get_parameters `\\ \\(ins\\)" +"이제 중요한 부분이 나옵니다. Ray는 리소스가 허용하는 경우에만 필요한 모든 리소스가 있을 때(병렬로 실행되는 등) 새 " +"클라이언트를 시작합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." -msgstr "현재 로컬 모델 파라미터를 반환합니다." +#: ../../source/how-to-monitor-simulation.rst:228 +#, fuzzy +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." +msgstr "" +"위의 예에서는 하나의 클라이언트만 실행되므로 클라이언트가 동시에 실행되지 않습니다. :code:`client_num_gpus = " +"0.5` 를 설정하면 두 개의 클라이언트를 실행할 수 있으므로 동시에 실행할 수 있습니다. 사용 가능한 리소스보다 더 많은 리소스를" +" 요구하지 않도록 주의하세요. :code:`client_num_gpus = 2`를 지정하면 시뮬레이션이 시작되지 않습니다(GPU가 " +"2개이지만 :code:`ray_init_args`에서 1개를 설정한 경우에도 마찬가지입니다)." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "자주 묻는 질문" + +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." +msgstr "질문: 기록된 메트릭이 보이지 않습니다." + +#: ../../source/how-to-monitor-simulation.rst:239 msgid "" -":py:obj:`get_properties `\\ \\(ins\\)" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" -":py:obj:`get_properties `\\ \\(ins\\)" +"A: 기간이 제대로 설정되지 않았을 수 있습니다. 설정은 오른쪽 상단에 있습니다(기본값은 '지난 30분'). 시뮬레이션이 실행된 " +"기간을 반영하도록 기간을 변경해 주세요." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "클라이언트의 속성 집합을 반환합니다." +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." +msgstr "" +"질문: \"Grafana 서버가 감지되지 않았습니다. Ray 대시보드의 메트릭 탭으로 이동한 후 Grafana 서버가 실행 중인지 " +"확인하고 이 페이지를 새로고침하세요.\"라는 메시지가 표시됩니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-monitor-simulation.rst:246 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "A: Grafana가 실행되고 있지 않을 수 있습니다. 실행 중인 서비스를 확인하세요" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." -msgstr "이 클라이언트에 실행 컨텍스트를 적용합니다." +#: ../../source/how-to-monitor-simulation.rst:252 +#, fuzzy +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." +msgstr "Q: ``_로 이동할 때 \"이 사이트에 연결할 수 없습니다.\"라는 메시지가 표시됩니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "A: 시뮬레이션이 이미 완료되었거나 아직 Prometheus를 시작해야 합니다." -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." -msgstr "클라이언트(자체)를 반환합니다." +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" +msgstr "리소스" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" -msgstr "속성" +#: ../../source/how-to-monitor-simulation.rst:259 +#, fuzzy +msgid "" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" +msgstr "" +"Ray 대시보드: ``_" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +#: ../../source/how-to-monitor-simulation.rst:261 +#, fuzzy +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" +msgstr "Ray 메트릭: ``_" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" -msgstr "파라미터" +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "시뮬레이션 실행" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:8 msgid "" -"The evaluation instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to customize " -"the local evaluation process." -msgstr "" -"서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로세스를 사용자 지정하는 데 " -"사용되는 구성 값 사전이 포함된 평가 지침입니다." +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." +msgstr "" +"Federated 학습 워크로드 시뮬레이션은 다양한 사용 사례에 유용합니다. 대규모 클라이언트 집단에서 워크로드를 실행하되 많은 " +"수의 물리적 장치를 소싱, 구성 및 관리할 필요가 없는 경우, 복잡한 설정 과정을 거치지 않고도 액세스 가능한 컴퓨팅 시스템에서 " +"최대한 빠르게 FL 워크로드를 실행하려는 경우, 다양한 수준의 데이터 및 시스템 이질성, 클라이언트 가용성, 개인정보 예산 등의 " +"다양한 시나리오에서 알고리즘을 검증하려는 경우 등 여러 가지 사용 사례에 유용합니다. 이러한 사례는 FL 워크로드 시뮬레이션이 " +"적합한 사용 사례 중 일부입니다. Flower는 `VirtualClientEngine `_ 또는 VCE를 통해 이러한 시나리오를 수용할 수 " +"있습니다." -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" -msgstr "반환" +#: ../../source/how-to-run-simulations.rst:19 +#, fuzzy +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" +msgstr "" +":code:`VirtualClientEngine`은 `virtual` 클라이언트를 예약, 실행 및 관리합니다. 이러한 클라이언트는 " +"`non-virtual` 클라이언트(예: `flwr.client.start_client `_ 명령을 통해 실행하는 클라이언트)와 동일하며, `flwr.client.NumPyClient `_에서 상속하는 클래스 생성으로 구성될 수 있으므로 동일한 " +"방식으로 동작합니다. 그 외에도 :code:`VirtualClientEngine`에 의해 관리되는 클라이언트는 다음과 같습니다:" + +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" +"resource-aware: 이는 각 클라이언트가 시스템에서 컴퓨팅 및 메모리의 일부를 할당받는다는 것을 의미합니다. 사용자는 " +"시뮬레이션을 시작할 때 이를 제어할 수 있으며, 이를 통해 Flower FL 시뮬레이션의 병렬 처리 정도를 제어할 수 있습니다. " +"클라이언트당 리소스가 적을수록 동일한 하드웨어에서 더 많은 클라이언트를 동시에 실행할 수 있습니다." -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:31 +#, fuzzy msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." msgstr "" -"로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이터 예제 수와 같은 기타 세" -"부 정보가 포함된 평가 결과입니다." - -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" -msgstr "반환 타입" +"self-managed: 이는 사용자가 클라이언트를 수동으로 실행할 필요가 없으며, 대신 " +":code:`VirtualClientEngine`의 내부에 위임된다는 의미입니다." -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:33 msgid "" -"The training instructions containing (global) model parameters received from " -"the server and a dictionary of configuration values used to customize the " -"local training process." +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -"서버에서 받은 (전역) 모델 파라미터와 로컬 학습 프로세스를 사용자 지정하는 데 " -"사용되는 구성 값 사전이 포함된 학습 지침입니다." +"ephemeral: 이는 클라이언트가 FL 프로세스에서 필요할 때만 구체화됨을 의미합니다(예: `fit() `_을 수행하기 위해). 객체는 나중에 소멸되어 할당된 리소스를 해제하고" +" 다른 클라이언트가 참여할 수 있도록 허용합니다." -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:38 +#, fuzzy msgid "" -"The training result containing updated parameters and other details such as " -"the number of local training examples used for training." +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -"업데이트된 매개변수와 훈련에 사용된 로컬 훈련 예제 수와 같은 기타 세부 정보" -"가 포함된 훈련 결과입니다." +":code:`VirtualClientEngine`은 확장 가능한 파이썬 워크로드를 위한 오픈 소스 프레임워크인 `Ray " +"`_를 사용하여 `virtual` 클라이언트를 구현합니다. 특히 Flower의 " +":code:`VirtualClientEngine`은 `Actors `_를 사용하여 `virtual` 클라이언트를 생성하고 해당 워크로드를 실행합니다." -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" +msgstr "Flower 시뮬레이션 시작" + +#: ../../source/how-to-run-simulations.rst:47 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -"구성 값 dictionary이 포함된 서버에서 받은 매개변수 가져오기 명령어입니다." +"Flower 시뮬레이션을 실행하려면 여전히 클라이언트 클래스, 전략 및 유틸리티 함수를 정의하여 데이터 세트를 다운로드하고 로드(및" +" 파티션)해야 합니다. 이 작업을 마친 후 시뮬레이션을 시작하려면 `start_simulation `_을 사용하면 되며, 최소한의 예시는 다음과 " +"같습니다:" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." -msgstr "현재 로컬 모델 파라미터입니다." +#: ../../source/how-to-run-simulations.rst:73 +msgid "VirtualClientEngine resources" +msgstr "VirtualClientEngine 리소스" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:75 +#, fuzzy +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." +msgstr "" +"기본적으로 VCE는 모든 시스템 리소스(예: 모든 CPU, 모든 GPU 등)에 액세스할 수 있으며, 이는 Ray를 시작할 때의 기본" +" 동작이기도 합니다. 그러나 일부 설정에서는 시뮬레이션에 사용되는 시스템 리소스의 수를 제한하고 싶을 수 있습니다. 이 설정은 " +"VCE가 내부적으로 Ray의 :code:`ray.init` 명령에 전달하는 :code:`start_simulation`에 대한 " +":code:`ray_init_args` 입력 인수를 통해 수행할 수 있습니다. 구성할 수 있는 전체 설정 목록은 `ray.init " +"`_" +" 설명서를 확인하세요. VCE가 시스템의 모든 CPU와 GPU를 사용하도록 하려면 :code:`ray_init_args`를 설정하지" +" 마세요." + +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" +msgstr "클라이언트 리소스 할당" + +#: ../../source/how-to-run-simulations.rst:99 +#, fuzzy msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "구성 값 dictionary이 포함된 서버로부터 받은 속성 가져오기 명령입니다." +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." +msgstr "" +"기본적으로 :code:`VirtualClientEngine`은 각 가상 클라이언트에 단일 CPU 코어를 할당합니다(그 외에는 " +"아무것도 할당하지 않음). 즉, 시스템에 코어가 10개인 경우 그만큼 많은 가상 클라이언트를 동시에 실행할 수 있습니다." -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." -msgstr "현재 클라이언트 속성입니다." +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" +"대부분의 경우 FL 워크로드의 복잡성(즉, 컴퓨팅 및 메모리 사용량)에 따라 클라이언트에 할당되는 리소스를 조정하고 싶을 것입니다." +" 시뮬레이션을 시작할 때 `client_resources` argument를 `start_simulation `_로 설정하여 이를 수행할 수 있습니다. Ray는 " +"내부적으로 두 개의 키를 사용하여 워크로드(이 경우 Flower 클라이언트)를 스케줄링하고 스폰합니다:" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" -msgstr "클라이언트앱" +#: ../../source/how-to-run-simulations.rst:110 +#, fuzzy +msgid "``num_cpus`` indicates the number of CPU cores a client would get." +msgstr ":code:`num_cpus`는 클라이언트에서 사용할 수 있는 CPU 코어 수를 나타냅니다." -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" -msgstr "Bases: :py:class:`object`" +#: ../../source/how-to-run-simulations.rst:111 +#, fuzzy +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." +msgstr ":code:`num_gpus`는 클라이언트에 할당되는 GPU 메모리의 **비율**을 나타냅니다." -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" -msgstr "예시" +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" +msgstr "몇 가지 예를 살펴보겠습니다:" -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-run-simulations.rst:132 +#, fuzzy msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." +msgstr "" +"code:`client_resources`를 사용하여 FL 시뮬레이션의 동시성 정도를 제어할 수 있지만, 동일한 라운드에서 수십, " +"수백 또는 수천 개의 클라이언트를 실행하고 훨씬 더 많은 '휴면'(즉, 라운드에 참여하지 않는) 클라이언트를 보유하는 것을 막을 " +"수는 없습니다. 라운드당 100명의 클라이언트를 받고 싶지만 시스템이 동시에 8명의 클라이언트만 수용할 수 있다고 가정해 봅시다. " +"code:`VirtualClientEngine`은 실행할 100개의 작업(각각 전략에서 샘플링한 클라이언트를 시뮬레이션)을 예약한 " +"다음 리소스 인식 방식으로 8개씩 일괄적으로 실행합니다." + +#: ../../source/how-to-run-simulations.rst:140 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -"일반적인 `Client` 구현의 이름이 `FlowerClient`라고 가정하면, 다음과 같이 " -"`ClientApp`으로 래핑할 수 있습니다:" +"리소스가 FL 클라이언트를 예약하는 데 사용되는 방법과 사용자 지정 리소스를 정의하는 방법에 대한 모든 복잡한 세부 사항을 " +"이해하려면 'Ray 문서 '를 참조하세요." -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:145 +msgid "Simulation examples" +msgstr "시뮬레이션 예제" + +#: ../../source/how-to-run-simulations.rst:147 msgid "" -"If the above code is in a Python module called `client`, it can be started " -"as follows:" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -"위의 코드가 'client'라는 Python 모듈에 있는 경우 다음과 같이 시작할 수 있습니" -"다:" +"Tensorflow/Keras와 파이토치에서 바로 실행할 수 있는 몇 가지 Flower 시뮬레이션 예제는 `Flower 레포지토리 " +"`_에서 제공됩니다. Google Colab에서도 실행할 수 있습니다:" -#: flwr.client.client_app.ClientApp:21 of +#: ../../source/how-to-run-simulations.rst:151 msgid "" -"In this `client:app` example, `client` refers to the Python module `client." -"py` in which the previous code lives in and `app` refers to the global " -"attribute `app` that points to an object of type `ClientApp`." +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" -"이 `client:app` 예제에서 `client`는 이전 코드가 있는 Python 모듈 `client.py`" -"를 가리키고 `app`는 `ClientApp` 유형의 객체를 가리키는 전역 속성 `app`을 가리" -"킵니다." +"`Tensorflow/Keras 시뮬레이션 " +"`_: 100개의 클라이언트가 공동으로 MNIST에서 MLP 모델을 훈련합니다." -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr ":py:obj:`evaluate `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." +msgstr "" +"파이토치 시뮬레이션 `_: 100개의 클라이언트가 공동으로 MNIST에서 CNN 모델을 훈련합니다." -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "클라이언트 앱에 평가함수를 등록하는 데코레이터를 반환합니다." +#: ../../source/how-to-run-simulations.rst:159 +msgid "Multi-node Flower simulations" +msgstr "멀티 노드 Flower 시뮬레이션" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" -msgstr ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:161 +#, fuzzy +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" +msgstr "" +"Flower의 :code:`VirtualClientEngine`을 사용하면 여러 컴퓨팅 노드에서 FL 시뮬레이션을 실행할 수 " +"있습니다. 멀티 노드 시뮬레이션을 시작하기 전에 다음 사항을 확인하세요:" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." -msgstr "클라이언트 앱에 query fn을 등록하는 데코레이터를 반환합니다." - -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`train `\\ \\(\\)" -msgstr ":py:obj:`train `\\ \\(\\)" - -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." -msgstr "클라이언트 앱에 train fn을 등록하는 데코레이터를 반환합니다." +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." +msgstr "모든 노드에서 동일한 Python 환경을 유지합니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "모든 노드에 코드 사본(예: 전체 레포지토리)을 보관하세요." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:166 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"모든 노드에 데이터 세트의 사본을 보유하세요(자세한 내용은 :ref:`simulation considerations " +"`에서 확인하세요)" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:168 +#, fuzzy msgid "" -":py:obj:`fit `\\ \\(parameters\\, config\\)" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" -":py:obj:`fit `\\ \\(parameters\\, config\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." -msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 파라미터를 학습합니다." - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +":code:`ray_init_args={\"address\"=\"auto\"}`를 `start_simulation `_에 전달하여 " +":code:`VirtualClientEngine`이 실행 중인 Ray 인스턴스에 연결되도록 합니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:171 +#, fuzzy msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"헤드 노드에서 Ray 시작: 터미널에서 :code:`ray start --head`를 입력합니다. 이 명령은 몇 줄을 출력하며, 그" +" 중 하나는 다른 노드를 헤드 노드에 연결하는 방법을 나타냅니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:174 +#, fuzzy msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" msgstr "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"헤드 노드에 다른 노드 연결: 헤드를 시작한 후 표시된 명령어을 복사하여 새 노드의 터미널에서 실행합니다: 예: :code:`ray" +" start --address='192.168.1.132:6379'`" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "클라이언트의 속성 집합을 반환합니다." +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "위의 모든 작업이 완료되면 단일 노드에서 시뮬레이션을 실행할 때와 마찬가지로 헤드 노드에서 코드를 실행할 수 있습니다." -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-run-simulations.rst:181 +#, fuzzy msgid "" -":py:obj:`set_context `\\ \\(context\\)" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -":py:obj:`set_context `\\ \\(context\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." -msgstr "객체를 클라이언트 유형으로 변환하고 반환합니다." - -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +"시뮬레이션이 완료되면 클러스터를 해체하려면 각 노드(헤드 노드 포함)의 터미널에서 :code:`ray stop` 명령을 실행하기만 " +"하면 됩니다." -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "현재(전역) 모델 매개변수입니다." +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" +msgstr "멀티 노드 시뮬레이션에 대해 알아두면 좋은 사항" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-run-simulations.rst:187 msgid "" -"Configuration parameters which allow the server to influence evaluation on " -"the client. It can be used to communicate arbitrary values from the server " -"to the client, for example, to influence the number of examples used for " -"evaluation." -msgstr "" -"서버가 클라이언트의 평가에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 " -"평가에 사용되는 예제 수에 영향을 주기 위해 서버에서 클라이언트로 임의의 값을 " -"전달하는 데 사용할 수 있습니다." +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "여기에서는 멀티 노드 FL 시뮬레이션을 실행할 때 흥미로운 몇 가지 기능을 나열합니다:" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-run-simulations.rst:189 +#, fuzzy msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or str. " -"It can be used to communicate arbitrary values back to the server." +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -"* **loss** (*float*) - 로컬 데이터 세트에서 모델의 평가 손실입니다. * " -"**num_examples** (*int*) -- 평가에 사용된 예제 수입니다. * **metrics** " -"(*Dict[str, Scalar]*) -- 임의의 문자열 키를 부울, 바이트, float, int 또는 " -"str 유형의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 " -"데 사용할 수 있습니다." +"사용자는 :code:`ray status`를 통해 헤드 노드에 연결된 모든 노드와 " +":code:`VirtualClientEngine`에 사용 가능한 총 리소스를 확인할 수 있습니다." -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-run-simulations.rst:192 +#, fuzzy msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local dataset." -msgstr "**loss** (*float*) -- 로컬 데이터 세트에서 모델의 평가 손실입니다." +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" +msgstr "" +"새 노드를 헤드에 연결하면 해당 노드의 모든 리소스(즉, 모든 CPU, 모든 GPU)가 헤드 노드에 표시됩니다. 즉, " +":code:`VirtualClientEngine`은 해당 노드가 실행할 수 있는 만큼의 `가상` 클라이언트를 예약할 수 있습니다. " +"일부 설정에서는 시뮬레이션에서 특정 리소스를 제외하고 싶을 수 있습니다. 모든 :code:`ray start` 명령(헤드 시작 시 " +"포함)에 `--num-cpus=` 및/또는 `--num-" +"gpus=`를 추가하여 이 작업을 수행하면 됩니다" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "**num_examples** (*int*) - 평가에 사용된 예제 수입니다." +#: ../../source/how-to-run-simulations.rst:202 +msgid "Considerations for simulations" +msgstr "시뮬레이션 시 고려 사항" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-run-simulations.rst:206 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary string " -"keys to values of type bool, bytes, float, int, or str. It can be used to " -"communicate arbitrary values back to the server." -msgstr "" -"**metrics** (*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, " -"int 또는 str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 " -"전달하는 데 사용할 수 있습니다." +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "Flower 시뮬레이션으로 모든 FL 워크로드를 간편하게 실행할 수 있도록 이러한 측면에서 적극적으로 노력하고 있습니다." -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +#: ../../source/how-to-run-simulations.rst:209 msgid "" -"The previous return type format (int, float, float) and the extended format " -"(int, float, float, Dict[str, Scalar]) have been deprecated and removed " -"since Flower 0.19." +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" -"이전 반환 유형 형식(int, float, float)과 확장 형식(int, float, float, " -"Dict[str, Scalar])은 Flower 0.19부터 더 이상 사용되지 않으며 제거되었습니다." +"현재 VCE를 사용하면 개인 노트북에서 간단한 시나리오를 프로토타이핑하든, 여러 고성능 GPU 노드에서 복잡한 FL 파이프라인을 " +"훈련하든 상관없이 시뮬레이션 모드에서 Federated 학습 워크로드를 실행할 수 있습니다. VCE에 더 많은 기능을 추가하는 " +"동안, 아래에서는 Flower로 FL 파이프라인을 설계할 때 염두에 두어야 할 몇 가지 사항을 강조합니다. 또한 현재 구현에서 몇 " +"가지 제한 사항을 강조합니다." -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/how-to-run-simulations.rst:217 +msgid "GPU resources" +msgstr "GPU 리소스" + +#: ../../source/how-to-run-simulations.rst:219 +#, fuzzy msgid "" -"Configuration parameters which allow the server to influence training on the " -"client. It can be used to communicate arbitrary values from the server to " -"the client, for example, to set the number of (local) training epochs." +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -"서버가 클라이언트의 훈련에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 " -"(로컬) 트레이닝 에포크 수를 설정하는 등 서버에서 클라이언트로 임의의 값을 전" -"달하는 데 사용할 수 있습니다." +"VCE는 :code:`client_resources`에서 :code:`num_gpus` 키를 지정하는 클라이언트에 GPU 메모리 " +"공유를 할당합니다. 즉, (VCE에서 내부적으로 사용하는) Ray가 기본적으로 사용됩니다:" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-run-simulations.rst:222 +#, fuzzy msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary string " -"keys to values of type bool, bytes, float, int, or str. It can be used to " -"communicate arbitrary values back to the server." +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -"* **parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다. * " -"**num_examples** (*int*) -- 학습에 사용된 예제 수입니다. * **metrics** " -"(*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, int,또는 str " -"타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 데 사" -"용할 수 있습니다." - -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "**parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다." - -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." -msgstr "**num_examples** (*int*) - 트레이닝에 사용된 예제 수입니다." +"GPU에서 사용 가능한 총 VRAM을 인식하지 못합니다. 즉, 시스템에 서로 다른(예: 32GB와 8GB) VRAM 용량을 가진 두" +" 개의 GPU가 있고 :code:`num_gpus=0.5`를 설정하면 둘 다 동시에 2개의 클라이언트를 실행하게 됩니다." -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:225 msgid "" -"Configuration parameters requested by the server. This can be used to tell " -"the client which parameters are needed along with some Scalar attributes." +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" -"서버에서 요청한 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 매개변" -"수가 필요한지 클라이언트에게 알려주는 데 사용할 수 있습니다." +"관련 없는(즉, VCE에 의해 생성되지 않은) 다른 워크로드가 GPU에서 실행되고 있는지 알지 못합니다. 여기서 두 가지 시사점을 " +"얻을 수 있습니다:" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +#: ../../source/how-to-run-simulations.rst:228 msgid "" -"**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "**parameters** -- 로컬 모델 파라미터를 NumPy 배열 목록으로 표시합니다." +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" +msgstr "" +"집계 후 '글로벌 모델'을 평가하려면 Flower 서버에 GPU가 필요할 수 있습니다(예: `evaluate method `_를 사용할 때)" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:231 +#, fuzzy msgid "" -"Configuration parameters requested by the server. This can be used to tell " -"the client which properties are needed along with some Scalar attributes." +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -"서버에서 요청하는 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 속성" -"이 필요한지 클라이언트에게 알려주는 데 사용할 수 있습니다." +"동일한 머신에서 여러 개의 독립적인 Flower 시뮬레이션을 실행하려면, 실험을 시작할 때 " +":code:`CUDA_VISIBLE_DEVICES=\"\"`로 GPU를 마스킹해야 합니다." -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-run-simulations.rst:235 +#, fuzzy msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of " -"type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" -"**properties** -- 임의의 문자열 키를 bool, bytes, float, int 또는 str 타입의 " -"값에 매핑하는 dictionary입니다. 임의의 속성 값을 서버에 다시 전달하는 데 사용" -"할 수 있습니다." +"또한 :code:`client_resources`에 전달된 GPU 리소스 제한이 '강제'되지 않아(즉, 초과할 수 있음) " +"클라이언트가 시뮬레이션을 시작할 때 지정된 비율보다 더 많은 VRAM을 사용하는 상황이 발생할 수 있습니다." -#: ../../source/ref-api/flwr.client.mod.rst:2 -msgid "mod" -msgstr "mod" +#: ../../source/how-to-run-simulations.rst:240 +msgid "TensorFlow with GPUs" +msgstr "GPU를 사용한 TensorFlow" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-run-simulations.rst:242 msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"`TensorFlow와 함께 GPU를 사용 `_하면 프로세스에 " +"보이는 모든 GPU의 거의 전체 GPU 메모리가 매핑됩니다. 이는 최적화 목적으로 TensorFlow에서 수행됩니다. 그러나 " +"GPU를 여러 개의 '가상' 클라이언트로 분할하려는 FL 시뮬레이션과 같은 설정에서는 이는 바람직한 메커니즘이 아닙니다. 다행히도 " +"'메모리 증가 활성화 " +"`_'를 통해 " +"이 기본 동작을 비활성화할 수 있습니다." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -msgid "Client-side adaptive clipping modifier." -msgstr "클라이언트 측 적응형 클리핑 수정자." +#: ../../source/how-to-run-simulations.rst:249 +#, fuzzy +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" +"이 작업은 메인 프로세스(서버가 실행되는 곳)와 VCE에서 생성한 각 액터에서 수행해야 합니다. " +":code:`actor_kwargs`를 통해 예약 키 `\"on_actor_init_fn\"`을 전달하여 액터 초기화 시 실행할 " +"함수를 지정할 수 있습니다. 이 경우 TF 워크로드에 대한 GPU 증가를 활성화합니다. 다음과 같이 보입니다:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-run-simulations.rst:272 msgid "" -":py:obj:`fixedclipping_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" -":py:obj:`fixedclipping_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"이것이 바로`Tensorflow/Keras Simulation " +"`_ 예제에서 사용된 메커니즘입니다." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -msgid "Client-side fixed clipping modifier." -msgstr "클라이언트 측 고정 클리핑 수정자." +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" +msgstr "멀티 노드 설정" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" -msgstr ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +#: ../../source/how-to-run-simulations.rst:278 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." +msgstr "" +"VCE는 현재 특정 '가상' 클라이언트를 어느 노드에서 실행할지 제어하는 방법을 제공하지 않습니다. 즉, 클라이언트가 실행하는 데 " +"필요한 리소스가 하나 이상의 노드에 있는 경우 해당 노드 중 어느 노드에나 클라이언트 워크로드가 예약될 수 있습니다. FL 프로세스" +" 후반부(즉, 다른 라운드에서)에는 동일한 클라이언트가 다른 노드에서 실행될 수 있습니다. 클라이언트가 데이터 세트에 액세스하는 " +"방식에 따라 모든 노드에 모든 데이터 세트 파티션의 복사본을 보유하거나 데이터 중복을 피하기 위해 데이터 세트 제공 메커니즘(예: " +"nfs, 데이터베이스 사용)을 사용해야 할 수 있습니다." + +#: ../../source/how-to-run-simulations.rst:286 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." +msgstr "" +"정의상 가상 클라이언트는 임시적 특성으로 인해 '상태 없음'입니다. 클라이언트 상태는 Flower 클라이언트 클래스의 일부로 구현할" +" 수 있지만, 사용자는 이를 영구 저장소(예: 데이터베이스, 디스크)에 저장하여 나중에 실행 중인 노드와 관계없이 동일한 " +"클라이언트가 검색할 수 있도록 해야 합니다. 이는 어떤 식으로든 클라이언트의 데이터 세트가 일종의 '상태'로 볼 수 있기 때문에 " +"위의 요점과도 관련이 있습니다." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "모델 체크포인트 저장 및 로드" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +"Flower는 서버 측에서 모델 업데이트를 자동으로 저장하지 않습니다. 이 사용법 가이드에서는 Flower에서 모델 체크포인트를 " +"저장(및 로드)하는 단계에 대해 설명합니다." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "" -"Handle incoming message and return results, following the SecAgg protocol." -msgstr "SecAgg 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "모델 체크포인트" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#, fuzzy +msgid "" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +":code:`Strategy` 메소드를 사용자 지정하여 서버 측에서 모델 업데이트를 지속할 수 있습니다. 사용자 지정 전략을 " +"구현하는 것은 항상 옵션이지만 대부분의 경우 기존 전략을 간단히 사용자 지정하는 것이 더 편리할 수 있습니다. 다음 코드 예시는 " +"기존의 기본 제공 :code:`FedAvg` 전략을 사용자 지정한 새로운 :code:`SaveModelStrategy`를 " +"정의합니다. 특히, 기본 클래스(:code:`FedAvg`)에서 :code:`aggregate_fit`을 호출하여 " +":code:`aggregate_fit`을 사용자 지정합니다. 그런 다음 호출자(즉, 서버)에게 집계된 가중치를 반환하기 전에 " +"반환된(집계된) 가중치를 계속 저장합니다:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of -msgid "" -"Handle incoming message and return results, following the SecAgg+ protocol." -msgstr "SecAgg+ 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +msgid "Save and load PyTorch checkpoints" +msgstr "파이토치 체크포인트 저장 및 로드" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" -":py:obj:`message_size_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -msgid "Message size mod." -msgstr "메시지 크기 수정." +"이전 예제와 비슷하지만 몇 가지 단계가 추가되어 ``torch.save`` 함수를 사용하여 파이토치 체크포인트를 저장하는 방법을 " +"보여드리겠습니다. 먼저, ``aggregate_fit``은 ``Parameters`` 객체를 반환하는데, 이 객체는 NumPy " +"``ndarray``의 목록으로 변환되어야 하며, ``OrderedDict`` 클래스 구조에 따라 파이토치 " +"``state_dict``로 변환됩니다." -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of -msgid "Parameters size mod." -msgstr "매개변수 크기 mod." +"진행 상황을 로드하려면 코드에 다음 줄을 추가하기만 하면 됩니다. 이렇게 하면 저장된 모든 체크포인트를 반복하고 최신 체크포인트를 " +"로드합니다:" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\, " -"sensitivity\\, ...\\)" -msgstr "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\, " -"sensitivity\\, ...\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -msgid "Modifier for local differential privacy." -msgstr "로컬 차분 프라이버시를 위한 수정자." +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." +msgstr "``전략``을 정의할 때 ``초기_파라미터``와 같이 필요한 경우 ``파라미터`` 유형의 이 객체를 반환/사용합니다." -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -msgid "LocalDpMod" -msgstr "LocalDpMod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "Flower 1.0으로 업그레이드" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -"This mod clips the client model updates and adds noise to the params before " -"sending them to the server." +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -"이 모드는 클라이언트 모델 업데이트를 클립하고 서버로 보내기 전에 파라미터에 " -"노이즈를 추가합니다." +"Flower 1.0이 출시되었습니다. 새로운 기능과 함께 Flower 1.0은 향후 성장을 위한 안정적인 기반을 제공합니다. " +"Flower 0.19(및 다른 0.x 시리즈 릴리스)와 비교했을 때 기존 0.x 시리즈 프로젝트의 코드를 변경해야 하는 몇 가지 " +"획기적인 변경 사항이 있습니다." -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." -msgstr "이 함수는 `MessageType.TRAIN` 유형의 메시지에 대해 작동합니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" +msgstr "업데이트 설치" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." -msgstr "클리핑 기준값입니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "다음은 pip 또는 Poetry를 사용하여 기존 설치를 Flower 1.0으로 업데이트하는 방법입니다:" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." -msgstr "클라이언트 모델의 민감도입니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "pip: 설치할 때 ``-U``를 추가합니다." -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" -"개인정보 보호 예산. 엡실론 값이 작을수록 개인정보 보호 수준이 높음을 나타냅니" -"다." +"``python -m pip install -U flwr``(``start_server`` 및 ``start_client``를 " +"사용하는 경우)" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"The failure probability. The probability that the privacy mechanism fails to " -"provide the desired level of privacy. A smaller value of delta indicates a " -"stricter privacy guarantee." -msgstr "" -"실패 확률입니다. 프라이버시 메커니즘이 원하는 수준의 프라이버시를 제공하지 못" -"할 확률입니다. 델타 값이 작을수록 프라이버시가 더 엄격하게 보장된다는 의미입" -"니다." - -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "``python -m pip install -U 'flwr[simulation]'``(``start_simulation`` 사용 시)" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" -"Create an instance of the local DP mod and add it to the client-side mods:" -msgstr "로컬 DP 모드의 인스턴스를 생성하고 클라이언트 측 모드에 추가합니다:" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" +"Poetry: ``pyproject.toml``에서 ``flwr`` dependency을 업데이트한 다음 다시 " +"설치하세요(``poetry 설치``를 실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는" +" 것을 잊지 마세요)." -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" -msgstr "adaptiveclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "``flwr = \"^1.0.0\"``(``start_server`` 및 ``start_client`` 사용 시)" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy wrapper." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" -"이 모드는 서버 측 전략 래퍼인 차분 프라이버시 클라이언트 측 적응형 클리핑과 " -"함께 사용해야 합니다." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` " +"(``start_simulation`` 사용 시)" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -msgid "The wrapper sends the clipping_norm value to the client." -msgstr "래퍼는 클라이언트에 clipping_norm 값을 전송합니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" +msgstr "필수 변경 사항" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "" -"This mod clips the client model updates before sending them to the server." -msgstr "이 모드는 클라이언트 모델 업데이트를 서버로 보내기 전에 클립합니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." +msgstr "다음과 같은 주요 변경 사항에는 수동 업데이트가 필요합니다." -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of -msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." -msgstr "또한 새 클리핑 값을 계산하기 위해 서버로 KEY_NORM_BIT을 전송합니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" +msgstr "일반" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -msgid "Notes" -msgstr "참고" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "모든 전달인자를 위치 전달인자가 아닌 키워드 전달인자로 전달합니다. 다음은 예시입니다:" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." -msgstr "여러 개를 사용할 때는 모드의 순서를 고려하세요." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" +msgstr "" +"Flower 0.19 (위치 전달인자): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" -"Typically, adaptiveclipping_mod should be the last to operate on params." +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" -"일반적으로 adaptiveclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니" -"다." +"Flower 1.0 (키워드 전달인자): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 -msgid "fixedclipping\\_mod" -msgstr "fixedclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "클라이언트" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" -"이 모드는 서버 측 전략 래퍼인 DifferentialPrivacyClientSideFixedClipping과 함" -"께 사용해야 합니다." +"``NumPyClient``의 서브클래스: ``def get_parameters(self):``를 ``def " +"get_parameters(self, config):``로 변경합니다" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" -"일반적으로 fixedclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." - -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" -msgstr "make\\_ffn" +"``클라이언트``의 서브클래스: ``def get_parameters(self):``를 ``def " +"get_parameters(self, ins: GetParametersIns):``로 변경합니다" -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" -msgstr "message\\_size\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "전략 / ``start_server`` / ``start_simulation``" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." -msgstr "이 모드는 전송되는 메시지의 크기를 바이트 단위로 기록합니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" +msgstr "" +"Dictionary 대신 ``ServerConfig``를 ``start_server`` 및 ``start_simulation``에 " +"전달합니다. 다음은 예제입니다:" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 -msgid "parameters\\_size\\_mod" -msgstr "parameters\\_size\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" +msgstr "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" -"This mod logs the number of parameters transmitted in the message as well as " -"their size in bytes." +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" -"이 모드는 메시지에서 전송된 매개변수의 수와 그 크기를 바이트 단위로 기록합니" -"다." +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" -msgstr "secagg\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" +msgstr "" +"``start_simulation``의 ``num_rounds=1``을 새로운 ``config=ServerConfig(...)``로" +" 바꿉니다(이전 항목 참조)" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 -msgid "secaggplus\\_mod" -msgstr "secaggplus\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." +msgstr "" +"'start_server`` 호출에서 ``force_final_distributed_eval`` 매개변수를 제거합니다. 모든 " +"클라이언트에 대한 분산 평가는 마지막 훈련 라운드 후 평가를 위해 모든 클라이언트를 샘플링하도록 전략을 구성하여 활성화할 수 " +"있습니다." -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "매개변수/ndarray 변환 함수의 이름을 바꿉니다:" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" -msgstr "run\\_supernode" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -msgid "start\\_client" -msgstr "start\\_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be `\"[::]:8080\"`." +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -"서버의 IPv4 또는 IPv6 주소입니다. Flower 서버가 포트 8080의 동일한 컴퓨터에" -"서 실행되는 경우 `서버_주소`는 `\"[::]:8080\"`이 됩니다." +"전략 초기화: 전략이 ``fraction_fit`` 및 ``fraction_evaluate``의 기본값에 의존하는 경우 " +"``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 수동 설정합니다. 전략을 수동으로 " +"생성하지 않는 프로젝트(전략 인스턴스를 전달하지 않고 ``start_server`` 또는 ``start_simulation``을 " +"호출하여)는 이제 ``fraction_fit`` 및 ``fraction_evaluate``를 ``0.1``로 설정하여 FedAvg를" +" 수동으로 초기화해야 합니다." -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" -msgstr "클라이언트를 인스턴스화하는 호출 가능 항목입니다. (기본값: None)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "기본 제공 전략 매개변수의 이름을 바꿉니다(예: ``FedAvg``):" -#: flwr.client.app.start_client:9 of -msgid "" -"An implementation of the abstract base class `flwr.client.Client` (default: " -"None)" -msgstr "추상 베이스 클래스 `flwr.client.Client`의 구현(기본값: None)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower " -"server. The default should be sufficient for most models. Users who train " -"very large models might need to increase this value. Note that the Flower " -"server needs to be started with the same value (see `flwr.server." -"start_server`), otherwise it will not know about the increased limit and " -"block larger messages." +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" -"Flower 서버와 교환할 수 있는 gRPC 메시지의 최대 길이입니다. 기본값은 대부분" -"의 모델에 충분합니다. 매우 큰 모델을 훈련하는 사용자는 이 값을 늘려야 할 수" -"도 있습니다. Flower 서버는 동일한 값으로 시작해야 하며(`flwr.server." -"start_server` 참조), 그렇지 않으면 증가된 제한을 알지 못해 더 큰 메시지를 차" -"단합니다." +"``rnd``의 이름을 ``server_round``로 바꿉니다. 이는 여러 메서드 및 함수(예: ``configure_fit``," +" ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate`` 및 " +"``evaluate_fn``)에 영향을 미칩니다." -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "``server_round`` 및 ``config``를 ``evaluate_fn``에 추가합니다:" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established to " -"an SSL-enabled Flower server." +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -"바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 " -"사용하여 SSL이 활성화된 Flower 서버에 보안 연결이 설정됩니다." +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection when " -"False, using system certificates if `root_certificates` is None." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" -"True일 경우 안전하지 않은 gRPC 연결을 시작합니다. root_certificates`가 None" -"인 경우 시스템 인증서를 사용하여 False일 때 HTTPS 연결을 활성화합니다." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" +msgstr "사용자 정의 전략" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) " -"- 'rest': HTTP (experimental)" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" -"전송 계층을 구성합니다. 허용되는 값입니다: - 'grpc-bidi': gRPC, 양방향 스트리" -"밍 - 'grpc-rere': gRPC, 요청-응답(실험적) - 'rest': HTTP(실험적)" +"매개변수 ``failures``의 유형이 ``List[BaseException]``에서 " +"``List[Union[Tuple[ClientProxy], FitRes], " +"BaseException]]``(``aggregate_fit``에서) 및 ``List[Union[Tuple[ClientProxy]," +" EvaluateRes], BaseException]]``(``aggregate_evaluate``)로 변경되었습니다" -#: flwr.client.app.start_client:31 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is no " -"limit to the number of tries." -msgstr "" -"연결 오류 발생 시 클라이언트가 서버 연결을 포기하기 전에 시도하는 최대 횟수입" -"니다. None으로 설정하면 시도 횟수에 제한이 없습니다." +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "이제 ``Strategy`` 메서드 ``evaluate``는 현재 federated 학습/평가 라운드를 첫 번째 파라미터로 받습니다:" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" -"The maximum duration before the client stops trying to connect to the server " -"in case of connection error. If set to None, there is no limit to the total " -"time." +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -"연결 오류 발생 시 클라이언트가 서버에 대한 연결을 시도하지 않는 최대 기간입니" -"다. None으로 설정하면 총 시간에는 제한이 없습니다." - -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "안전하지 않은 서버 연결로 gRPC 클라이언트 시작하기:" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "시스템 인증서를 사용하여 SSL 사용 gRPC 클라이언트를 시작합니다:" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "제공된 인증서를 사용하여 SSL 지원 gRPC 클라이언트를 시작합니다:" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" +msgstr "선택적 개선 사항" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -msgid "start\\_numpy\\_client" -msgstr "start\\_numpy\\_client" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "위의 필수 변경 사항과 함께 방금 가능한 여러 가지 잠재적 개선 사항이 있습니다:" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" -"This function is deprecated since 1.7.0. Use :code:`flwr.client." -"start_client` instead and first convert your :code:`NumPyClient` to type :" -"code:`flwr.client.Client` by executing its :code:`to_client()` method." +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" -"이 함수는 1.7.0부터 더 이상 사용되지 않습니다. 대신 :code:`flwr.client." -"start_client`를 사용하고 먼저 :code:`to_client()` 메서드를 실행하여 :code:" -"`NumPyClient`를 :code:`flwr.client.Client` 유형으로 변환합니다." - -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -msgstr "추상 베이스 클래스 `flwr.client.NumPyClient`의 구현입니다." - -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "공통" +"``Client`` 또는 ``NumPyClient``의 서브 클래스에서 \"placeholder\" 메서드를 제거합니다. 예를 들어" +" 서버 측 평가를 사용하는 경우 ``evaluate``의 빈 자리 표시자 구현은 더 이상 필요하지 않습니다." -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 msgid "" -":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" -":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +"``start_simulation``을 통해 라운드 타임아웃을 구성합니다: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -msgid "Create Array from NumPy ndarray." -msgstr "NumPy에서 배열을 만듭니다." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" +msgstr "추가 도움말" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" -":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" -":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +"대부분의 공식 ``Flower code 예제 " +"`_는 이미 Flower 1.0으로 " +"업데이트되어 있으며, Flower 1.0 API를 사용하기 위한 참고 자료로 사용할 수 있습니다. 더 궁금한 점이 있다면 ``플라워" +" 슬랙 `_에 가입하여 ``#questions`` 채널을 이용하세요." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "바이트에서 NumPy를 역직렬화합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" +msgstr "Flower Next 업그레이드" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:4 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." -msgstr "파일 및/또는 원격 로그 서버에 로깅을 구성합니다." +"Flower에서 Flower Next로의 업데이트를 위한 이동 가이드에 오신 것을 환영합니다! 이 가이드는 숙련된 사용자든 이제 막" +" 시작한 사용자든 상관없이 기존 설정을 원활하게 전환하여 버전 1.8부터 Flower Next의 최신 기능 및 개선 사항을 활용할 " +"수 있도록 도와드립니다." -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:11 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"이 가이드에서는 Flower Next의 *호환성 레이어*를 사용하여 최소한의 코드 변경으로 ``1.8`` 이전의 Flower 코드를" +" 재사용하는 방법을 보여줍니다. 다른 가이드에서는 순수한 Flower Next API로 Flower Next를 end-to-end로" +" 실행하는 방법을 보여드리겠습니다." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." -msgstr "차단을 피하기 위해 create_event를 ThreadPoolExecutor에 제출합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:15 +msgid "Let's dive in!" +msgstr "자세히 알아봅시다!" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:68 msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "기존에 설치된 Flower to Flower Next를 ``pip``으로 업데이트하는 방법은 다음과 같습니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:74 +msgid "or if you need Flower Next with simulation:" +msgstr "또는 시뮬레이션이 포함된 Flower Next가 필요한 경우:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:80 +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "``requirements.txt``에서 다음 버전 제약 조건을 설정했는지 확인하세요" + +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +msgid "or ``pyproject.toml``:" +msgstr "또는 ``pyproject.toml``:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +msgid "Using Poetry" +msgstr "Poetry 사용" + +#: ../../source/how-to-upgrade-to-flower-next.rst:103 +msgid "" +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"``pyproject.toml``에서 ``flwr`` 의존성를 업데이트한 다음 다시 설치하세요(``poetry install``을 " +"실행하기 전에 ``rm poetry.lock``을 통해 ``poetry.lock``을 삭제하는 것을 잊지 마세요)." -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "정수 심각도 'level'과 함께 'msg % args'를 기록합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:106 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:123 msgid "" -":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +"Flower Next에서는 *infrastructure*와 *application layers*가 분리되었습니다. 코드에서 " +"``start_client()``를 통해 클라이언트를 시작하는 대신, 명령줄을 통해 |clientapp_link|_를 생성하여 " +"시작합니다. 코드에서 ``start_server()``를 통해 서버를 시작하는 대신 |serverapp_link|_를 생성하고 " +"명령줄을 통해 서버를 시작합니다. 서버와 클라이언트의 장기 실행 컴포넌트를 SuperLink와 SuperNode라고 합니다. 수동 " +"업데이트가 필요하지 않고 기존 방식과 Flower Next 방식 모두에서 프로젝트를 실행할 수 있는 non-breaking 변경 " +"사항은 다음과 같습니다:" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "NumPy와 배열을 바이트열로 직렬화합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "|clientapp_link|_" +msgstr "|clientapp_link|_" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:134 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "" +"|clientapp_link|_를 통해 실행하는 대신 기존 클라이언트를 |clientapp_link|_로 래핑하세요. 다음은 " +"예시입니다:" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "표준 시간대를 UTC로 설정하여 time.time()에서 날짜 시간을 생성합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +msgid "|serverapp_link|_" +msgstr "|serverapp_link|_" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:159 msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"서버를 시작하려면 |startserver_link|_를 통해 서버를 시작하는 대신 기존 전략을 |serverapp_link|_로 " +"래핑하세요. 다음은 예시입니다:" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "NumPy 배열을 매개변수 객체로 변환합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +msgid "Deployment" +msgstr "배포" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:182 msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"실행하기 전에 |flowernext_superlink_link|_를 사용하여 ``SuperLink``를 실행한 후 " +"|flowernext_clientapp_link|_(2회) 및 |flowernext_serverapp_link|_를 순서대로 " +"실행합니다. 'client.py'와 'server.py'를 Python 스크립트로 실행할 필요는 없습니다." -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "매개변수 객체를 NumPy 배열로 변환합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:185 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "다음은 HTTPS 없이 서버를 시작하는 예제입니다(프로토타이핑용으로만 사용):" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:201 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, data\\)" +"다음은 HTTPS로 시작하는 또 다른 예제입니다. '`--ssl-ca-certfile``, '`--ssl-certfile``, " +"'`--ssl-keyfile`` 명령줄 옵션을 사용하여 (CA 인증서, 서버 인증서 및 서버 개인 키)의 경로를 전달합니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." -msgstr "배열 유형." +#: ../../source/how-to-upgrade-to-flower-next.rst:229 +msgid "Simulation in CLI" +msgstr "CLI 시뮬레이션" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:231 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"기존 클라이언트와 전략을 각각 |clientapp_link|_와 |serverapp_link|_로 래핑하세요. 더 이상 " +"|startsim_link|_를 사용할 필요가 없습니다. 다음은 예시입니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/how-to-upgrade-to-flower-next.rst:264 +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -"ClientMessage는 하나의 결과 메시지를 저장하는 데 사용되는 컨테이너입니다." +"CLI에서 |flower_simulation_link|_를 실행하고 Python 스크립트를 실행하는 대신 코드에서 " +"``server_app`` / ``client_app`` 개체를 가리키세요. 다음은 예제입니다(``server_app`` 및 " +"``client_app`` 객체가 ``sim.py`` 모듈에 있다고 가정):" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:281 +msgid "" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" +msgstr "" +"|startsim_link|_에서 ``client_resources`` 인수를 설정하는 대신 ``--backend-config`` " +"명령줄 인수를 사용하여 각 |clientapp_link|_에 대한 기본 리소스를 설정하세요. 다음은 예시입니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "클라이언트 상태 코드." +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +msgid "Simulation in a Notebook" +msgstr "Notebook에서 시뮬레이션" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:307 msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "notebook에서 |startsim_link|_ 대신 |runsim_link|_를 실행하세요. 다음은 예시입니다:" + +#: ../../source/how-to-upgrade-to-flower-next.rst:351 +msgid "" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"일부 공식 ``Flower 코드 예제 `_는 이미 플라워 넥스트에 " +"업데이트되어 있으므로 플라워 넥스트 API를 사용하는 데 참고할 수 있습니다. 더 궁금한 점이 있다면 ``플라워 슬랙 " +"`_에 가입하고 ``#questions`` 채널을 이용하세요. 또한, " +"``Flower Discuss `_에 참여하여 질문에 대한 답변을 확인하거나 다른" +" 사람들과 Flower Next로의 이동에 대해 공유하고 배울 수 있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "Configs record." -msgstr "레코드를 설정합니다." +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +msgid "Important" +msgstr "중요" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-upgrade-to-flower-next.rst:360 +msgid "" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "" +"Flower Next는 빠른 속도로 지속적으로 개선되고 있으므로 이 가이드는 주기적으로 업데이트될 예정입니다. 피드백이 있으면 " +"언제든지 공유해 주세요!" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." -msgstr "실행 상태." +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +msgid "Happy migrating! 🚀" +msgstr "행복한 마이그레이션! 🚀" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" -msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" +msgstr "기본 제공 모드 사용" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "클라이언트에서 서버로 연결 해제 메시지를 보냅니다." +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "**참고: 이 튜토리얼은 실험적인 기능을 다룹니다. 기능 및 인터페이스는 향후 버전에서 변경될 수 있습니다.**" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:7 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, config\\)" +"이 튜토리얼에서는 내장 모드를 활용하여 ``ClientApp``의 동작을 보강하는 방법을 배우겠습니다. " +"Mods(Modifiers라고도 함)를 사용하면 ``ClientApp``에서 작업이 처리되기 전과 후에 작업을 수행할 수 있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "클라이언트에 대한 지침을 평가합니다." +#: ../../source/how-to-use-built-in-mods.rst:12 +msgid "What are Mods?" +msgstr "Mods란 무엇인가요?" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"Mod는 ``ClientApp``을 감싸는 콜러블입니다. 들어오는 ``Message``와 그 결과로 나가는 ``Message``를 " +"조작하거나 검사할 수 있습니다. ``Mod``의 시그니처는 다음과 같습니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "클라이언트의 응답을 평가합니다." +#: ../../source/how-to-use-built-in-mods.rst:23 +msgid "A typical mod function might look something like this:" +msgstr "일반적인 mod 함수는 다음과 같은 모습일 수 있습니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "Using Mods" +msgstr "Mods 사용" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "원격 분석 이벤트의 유형." +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "``ClientApp``에서 mods를 사용하려면 다음 단계를 따르세요:" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-use-built-in-mods.rst:41 +msgid "1. Import the required mods" +msgstr "1. 필요한 mods를 가져옵니다" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "고객을 위한 맞춤 지침." +#: ../../source/how-to-use-built-in-mods.rst:43 +msgid "First, import the built-in mod you intend to use:" +msgstr "먼저 사용하려는 기본 제공 mod를 가져옵니다:" + +#: ../../source/how-to-use-built-in-mods.rst:51 +msgid "2. Define your client function" +msgstr "2. 클라이언트 기능 정의" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "mod(s)로 래핑할 클라이언트 함수('``client_fn``)를 정의합니다:" + +#: ../../source/how-to-use-built-in-mods.rst:62 +msgid "3. Create the ``ClientApp`` with mods" +msgstr "3. mods로 ``ClientApp``을 생성합니다" + +#: ../../source/how-to-use-built-in-mods.rst:64 +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"``ClientApp``을 생성하고 mods를 ``mods`` argument에 목록으로 전달합니다. mods를 제공하는 순서가 " +"중요합니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "클라이언트의 적합성 응답." +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "Order of execution" +msgstr "실행 순서" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/how-to-use-built-in-mods.rst:80 +msgid "" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "``ClientApp``이 실행되면 목록에 제공된 순서대로 모드가 실행됩니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." -msgstr "발생한 오류에 대한 정보를 저장하는 데이터 클래스입니다." +#: ../../source/how-to-use-built-in-mods.rst:83 +msgid "``example_mod_1`` (outermost mod)" +msgstr "``example_mod_1``(가장 바깥쪽 mod)" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:84 +msgid "``example_mod_2`` (next mod)" +msgstr "``example_mod_2`` (다음 mod)" + +#: ../../source/how-to-use-built-in-mods.rst:85 msgid "" -":py:obj:`GetParametersIns `\\ \\(config\\)" -msgstr "" -":py:obj:`GetParametersIns `\\ \\(config\\)" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "Message handler(들어오는 ``Message``를 처리하고 나가는 ``Message``를 반환하는 핵심 함수)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "클라이언트에 대한 매개변수 요청입니다." +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "``example_mod_2`` (on the way back)" +msgstr "``example_mod_2``(돌아가는 방법)" + +#: ../../source/how-to-use-built-in-mods.rst:88 +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "``example_mod_1``(돌아가는 방법에 가장 바깥쪽 모드)" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:90 msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." msgstr "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"각 mod는 다음 mod로 전달하기 전에 들어오는 ``Message``를 검사하고 수정할 기회가 있으며, 스택 위로 반환하기 전에 " +"나가는 ``Message``도 마찬가지로 검사하고 수정할 수 있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "매개변수 반환 요청 시 응답합니다." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:97 msgid "" -":py:obj:`GetPropertiesIns `\\ \\(config\\)" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -":py:obj:`GetPropertiesIns `\\ \\(config\\)" +"이 가이드를 따라 mods를 효과적으로 사용하여 ``ClientApp``의 기능을 향상시키는 방법을 배웠습니다. mods 순서는 " +"매우 중요하며 입력과 출력이 처리되는 방식에 영향을 미친다는 점을 기억하세요." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "클라이언트에 대한 속성 요청." +#: ../../source/how-to-use-built-in-mods.rst:101 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "Mods를 통해 더욱 강력하고 유연한 ``ClientApp``을 구축해 보세요!" + +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" +msgstr "차분 개인정보 보호 사용" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "클라이언트의 속성 응답을 확인합니다." +"이 가이드에서는 Flower 프레임워크에서 차분 개인정보 보호 기능을 활용하는 방법을 설명합니다. 차분 개인정보 보호에 대해 아직 " +"익숙하지 않은 경우 :doc:`explanation-differential-privacy`를 참조하세요." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:10 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"Flower의 차분 개인정보 보호는 현재 프리뷰 단계에 있습니다. 민감한 데이터가 있는 프로덕션 환경에서 이러한 기능을 사용할 " +"계획이라면 언제든지 문의하여 요구 사항을 논의하고 이러한 기능을 가장 잘 사용하는 방법에 대한 안내를 받으세요." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." -msgstr "애플리케이션을 사용하는 엔티티의 관점에서 애플리케이션의 상태입니다." +#: ../../source/how-to-use-differential-privacy.rst:17 +#, fuzzy +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "" +"이 접근 방식은 업데이트 클리핑과 집계된 모델에 노이즈 추가라는 두 가지 단계로 구성됩니다. 클리핑 단계의 경우, Flower " +"프레임워크는 클리핑을 서버 측에서 수행할지 클라이언트 측에서 수행할지 결정할 수 있도록 했습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr ":py:obj:`MessageType `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." -msgstr "메시지 타입." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." -msgstr "레거시 메시지 타입." - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:21 msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " -"src\\_node\\_id\\, ...\\)" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -":py:obj:`Metadata `\\ \\(run\\_id\\, message\\_id\\, " -"src\\_node\\_id\\, ...\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." -msgstr "현재 메시지와 관련된 메타데이터를 보유한 데이터 클래스입니다." +"**Server-side Clipping**: 이 방식은 서버가 모든 클라이언트의 업데이트에 대해 균일한 클리핑을 적용하고 클리핑 " +"값에 대한 통신 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 모든 클라이언트에 대해 클리핑 작업을 수행해야 하기 때문에 " +"서버의 계산 부하가 증가한다는 단점도 있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:26 msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." -msgstr "메트릭 기록." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" -msgstr ":py:obj:`NDArray `\\" +"**Client-side Clipping**: 이 방식은 서버의 계산 오버헤드를 줄일 수 있다는 장점이 있습니다. 하지만 서버가 " +"클리핑 프로세스에 대한 통제력이 떨어지기 때문에 centralized 제어가 부족하다는 단점도 있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, :py:class:" -"`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -msgstr "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, :py:class:" -"`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "Server-side Clipping" +msgstr "서버 측 클리핑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:33 +#, fuzzy msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"서버 측 클리핑이 있는 중앙 DP의 경우, 실제 :code:`Strategy` 인스턴스를 감싸는 래퍼 역할을 하는 두 개의 " +":code:`Strategy` 클래스가 있습니다(예: :code:`FedAvg`). 두 개의 래퍼 클래스는 고정 및 적응형 클리핑을" +" 위한 :code:`DifferentialPrivacyServerSideFixedClipping`과 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`입니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "모델 매개변수." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" +msgstr "서버 측 클리핑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:43 +#, fuzzy msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"아래 코드 샘플은 :code:`FedAvg` 전략이 " +":code:`DifferentialPrivacyServerSideFixedClipping` 래퍼 클래스를 사용하여 서버 측 고정 " +"클리핑을 사용할 수 있도록 합니다. 해당 입력 매개변수를 조정하여 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`과 동일한 접근 방식을 사용할 수 " +"있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "Parameters record." -msgstr "매개변수 기록." +#: ../../source/how-to-use-differential-privacy.rst:64 +msgid "Client-side Clipping" +msgstr "클라이언트 측 클리핑" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../source/how-to-use-differential-privacy.rst:66 +#, fuzzy +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." +msgstr "" +"클라이언트 측 클리핑이 있는 중앙 DP의 경우 서버는 각 라운드마다 선택한 클라이언트에 클리핑 값을 보냅니다. 클라이언트는 기존 " +"Flower :code:`Mods`를 사용하여 클리핑을 수행할 수 있습니다. 고정 및 적응형 클라이언트 측 클리핑에는 두 가지 " +"모드를 사용할 수 있습니다: :code:`fixedclipping_mod` 및 :code:`adaptiveclipping_mod`와" +" 해당 서버 측 래퍼 :code:`DifferentialPrivacyClientSideFixedClipping` 및 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`이 있습니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "서버에서 클라이언트로 메시지를 다시 연결합니다." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" +msgstr "클라이언트 측 클리핑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:78 +#, fuzzy msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "RecordSet은 매개변수, 메트릭 및 설정 그룹을 저장합니다." +"아래 코드 샘플은 :code:`FedAvg` 전략이 클라이언트 측 고정 클리핑과 함께 차분 프라이버시를 사용할 수 있도록 " +":code:`DifferentialPrivacyClientSideFixedClipping` 래퍼 클래스와 클라이언트에서 " +":code:`fixedclipping_mod`를 모두 사용하도록 합니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:97 +#, fuzzy msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"서버 측 전략 래퍼 외에도 클라이언트 측 클리핑을 수행하려면 :code:`ClientApp`이 일치하는 " +":code:`fixedclipping_mod`를 구성해야 합니다:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/how-to-use-differential-privacy.rst:115 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -"ServerMessage는 하나의 instruction 메시지를 저장하는 데 사용되는 컨테이너입니" -"다." - -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +"로컬 차분 프라이버시(DP)를 활용하고 클라이언트 모델 파라미터를 서버로 전송하기 전에 노이즈를 추가하려면 `LocalDpMod`를" +" 사용하면 됩니다. 클리핑 노멀 값, 감도, 엡실론, 델타 등의 하이퍼파라미터를 설정해야 합니다." -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "클라이언트 상태." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" +msgstr "로컬 DP mod" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" -msgstr "배열" +#: ../../source/how-to-use-differential-privacy.rst:125 +#, fuzzy +msgid "Below is a code example that shows how to use ``LocalDpMod``:" +msgstr "다음은 :code:`LocalDpMod`를 사용하는 방법을 보여주는 코드 예시입니다:" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../source/how-to-use-differential-privacy.rst:140 msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -"배열형 또는 텐서형 객체의 직렬화된 데이터와 그에 대한 일부 메타데이터를 포함" -"하는 데이터 클래스입니다." +"여러 개의 수정자를 사용할 때는 수정자, 특히 매개변수를 수정하는 수정자의 순서가 중요하다는 점에 유의하세요. 일반적으로 차분 " +"프라이버시(DP) 수정자는 매개변수에서 가장 마지막에 작동해야 합니다." -#: flwr.common.record.parametersrecord.Array:6 of -msgid "" -"A string representing the data type of the serialised object (e.g. `np." -"float32`)" -msgstr "직렬화된 객체의 데이터 유형을 나타내는 문자열(예: `np.float32`)" +#: ../../source/how-to-use-differential-privacy.rst:145 +msgid "Local Training using Privacy Engines" +msgstr "Privacy Engines을 사용한 로컬 훈련" -#: flwr.common.record.parametersrecord.Array:8 of +#: ../../source/how-to-use-differential-privacy.rst:147 msgid "" -"A list representing the shape of the unserialized array-like object. This is " -"used to deserialize the data (depending on the serialization method) or " -"simply as a metadata field." +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -"직렬화되지 않은 배열과 같은 객체의 모양을 나타내는 목록입니다. 직렬화 방법에 " -"따라 데이터를 역직렬화하는 데 사용되거나 단순히 메타데이터 필드로 사용됩니다." +"클라이언트 측에서 로컬 모델을 훈련하는 동안 데이터 인스턴스 수준의 개인 정보 보호를 보장하려면 Opacus 및 TensorFlow" +" Privacy와 같은 개인 정보 보호 엔진을 활용하는 것을 고려하세요. 이러한 엔진과 함께 Flower를 사용하는 예제는 " +"Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)를 참조하세요." -#: flwr.common.record.parametersrecord.Array:12 of +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "전략 사용하기" + +#: ../../source/how-to-use-strategies.rst:4 +#, fuzzy msgid "" -"A string indicating the type of serialisation mechanism used to generate the " -"bytes in `data` from an array-like or tensor-like object." +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" -"배열형 또는 텐서형 객체에서 `데이터`의 바이트를 생성하는 데 사용되는 직렬화 " -"메커니즘의 유형을 나타내는 문자열입니다." - -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." -msgstr "데이터를 포함하는 바이트 버퍼입니다." - -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr ":py:obj:`numpy `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -msgid "Return the array as a NumPy array." -msgstr "배열을 NumPy 배열로 반환합니다." - -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" -msgstr ":py:obj:`dtype `\\" +"Flower는 :code:`Strategy` abstraction를 통해 학습 과정을 완전히 사용자 정의할 수 있습니다. 핵심 " +"프레임워크에는 여러 가지 기본 제공 전략이 제공됩니다." -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`shape `\\" -msgstr ":py:obj:`shape `\\" +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "서버 측에서 Flower가 학습 과정을 조율하는 방식을 사용자 지정하는 방법에는 세 가지가 있습니다:" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`stype `\\" -msgstr ":py:obj:`stype `\\" +#: ../../source/how-to-use-strategies.rst:10 +#, fuzzy +msgid "Use an existing strategy, for example, ``FedAvg``" +msgstr "기존 전략(예: :code:`FedAvg`)을 사용합니다" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" -msgstr ":py:obj:`data `\\" +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" +msgstr "콜백 함수로 기존 전략 사용자 지정" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -msgid "ClientMessage" -msgstr "클라이언트 메시지" +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" +msgstr "새로운 전략 구현" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" -msgstr ":py:obj:`evaluate_res `\\" +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" +msgstr "기존 전략 사용" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" -msgstr ":py:obj:`fit_res `\\" +#: ../../source/how-to-use-strategies.rst:17 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "Flower에는 여러 가지 인기 있는 연합 학습 전략이 기본으로 제공됩니다. 기본 제공 전략은 다음과 같이 인스턴스화할 수 있습니다:" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-strategies.rst:27 +#, fuzzy msgid "" -":py:obj:`get_parameters_res `\\" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" -":py:obj:`get_parameters_res `\\" +"이렇게 하면 모든 매개변수가 기본값으로 유지된 전략이 생성되어 :code:`start_server` 함수에 전달됩니다. 일반적으로 " +"인스턴스화 중에 몇 가지 매개변수를 조정하는 것이 좋습니다:" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/how-to-use-strategies.rst:45 msgid "" -":py:obj:`get_properties_res `\\" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" -":py:obj:`get_properties_res `\\" - -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" -msgstr "코드" - -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`~enum.Enum`" +"기존 전략은 동작을 사용자 지정하는 여러 가지 방법을 제공합니다. 콜백 함수를 사용하면 전략이 실행 중에 사용자가 제공한 코드를 " +"호출할 수 있습니다." -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" -msgstr ":py:obj:`OK `\\" +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" +msgstr "클라이언트 적합성 및 클라이언트 평가 구성" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-strategies.rst:51 +#, fuzzy msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" -msgstr "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED `\\" +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." +msgstr "" +"서버는 매 라운드마다 새로운 설정 값을 클라이언트에 전달하기 위해 :code:`on_fit_config_fn`에 함수를 제공할 수 " +"있습니다. 제공된 함수는 전략에 의해 호출되며 클라이언트에 전송될 구성 키 값 쌍의 dictionary를 반환해야 합니다. 연합 " +"학습의 각 라운드 동안 임의의 구성 값 dictionary인 :code:`client.fit` 및 " +":code:`client.evaluate` 함수를 반환해야 합니다." + +#: ../../source/how-to-use-strategies.rst:84 +#, fuzzy +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." +msgstr "" +":code:`on_fit_config_fn`은 서버에서 클라이언트로 임의의 구성 값을 전달하고, 예를 들어 학습 속도를 조정하기 " +"위해 매 라운드마다 이 값을 잠재적으로 변경하는 데 사용할 수 있습니다. 클라이언트는 자체 :code:`client.fit()` " +"함수에서 :code:`on_fit_config_fn`이 반환한 dictionary를 받습니다." -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-strategies.rst:89 +#, fuzzy msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED `\\" +":code:`on_fit_config_fn`과 유사하게, :code:`client.evaluate()`로 전송되는 구성을 사용자 " +"지정하는 :code:`on_evaluate_config_fn`도 있습니다" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" +msgstr "서버 측 평가 구성" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../source/how-to-use-strategies.rst:95 +#, fuzzy +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." +msgstr "서버 측 평가는 :code:`evaluate_fn`에 평가 함수를 전달하여 활성화할 수 있습니다." + +#: ../../source/how-to-use-strategies.rst:101 msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED `\\" +"완전한 사용자 지정 전략을 작성하는 것은 조금 더 복잡하지만 유연성이 가장 뛰어납니다. 자세한 내용은 `Implementing " +"Strategies `_ 가이드를 참조하세요." -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -msgid "ConfigsRecord" -msgstr "컨피그 레코드" +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "튜토리얼" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" -"`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` | :py:class:" -"`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ [:py:class:`int`] | :" -"py:class:`~typing.List`\\ [:py:class:`float`] | :py:class:`~typing.List`\\ [:" -"py:class:`str`] | :py:class:`~typing.List`\\ [:py:class:`bytes`] | :py:class:" -"`~typing.List`\\ [:py:class:`bool`]]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`int` | :py:class:`float` | :py:class:`str` | :py:class:`bytes` |" -" :py:class:`bool` | :py:class:`~typing.List`\\ [:py:class:`int`] | " -":py:class:`~typing.List`\\ [:py:class:`float`] | :py:class:`~typing.List`\\ " -"[:py:class:`str`] | :py:class:`~typing.List`\\ [:py:class:`bytes`] | " -":py:class:`~typing.List`\\ [:py:class:`bool`]]" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "빠른 시작 튜토리얼" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." -msgstr "R에서 모든 항목을 제거합니다." +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" +msgstr "사용 방법 가이드" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/index.rst:106 +msgid "Legacy example guides" +msgstr "레거시 예제 가이드" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." -msgstr "이 객체에 저장된 바이트 수를 반환합니다." +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" +msgstr "설명" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: None:-1 +msgid "API reference" +msgstr "API 참조" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." -msgstr "d는 기본값이 None입니다." +#: ../../source/index.rst:145 +msgid "Reference docs" +msgstr "참조 문서" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +#: ../../source/index.rst:160 +msgid "Contributor tutorials" +msgstr "기여자 튜토리얼" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: ../../source/index.rst:167 +msgid "Contributor how-to guides" +msgstr "기여자 사용법 가이드" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/index.rst:179 +msgid "Contributor explanations" +msgstr "기여자 설명" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "" -"If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "키를 찾을 수 없으면 주어진 경우 d가 반환되고, 그렇지 않으면 KeyError가 " -"발생합니다." +#: ../../source/index.rst:185 +msgid "Contributor references" +msgstr "기여자 참조" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/index.rst:-1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." -msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "연합 학습을 위한 Python 개발을 쉽게 할 수 있는 주요 Flower 프레임워크의 설명서를 확인하세요." -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "플라워 프레임워크 문서" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." -msgstr "이 함수는 booleans을 1바이트를 차지하는 것으로 계산합니다." +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "Flower 문서에 오신 것을 환영합니다. Flower `_는 편한 연합 학습 프레임워크입니다." -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" -msgstr "컨텍스트" +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "Flower 커뮤니티 가입하기" -#: flwr.common.context.Context:3 of +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "Flower 커뮤니티는 연구원, 엔지니어, 학생, 전문가, 학자 및 기타 애호가들로 구성된 편한 그룹으로 빠르게 성장하고 있습니다." + +#: ../../source/index.rst:16 +msgid "Join us on Slack" +msgstr "Slack에 가입하세요" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower 프레임워크" + +#: ../../source/index.rst:25 msgid "" -"Holds records added by the entity in a given run and that will stay local. " -"This means that the data it holds will never leave the system it's running " -"from. This can be used as an intermediate storage or scratchpad when " -"executing mods. It can also be used as a memory to access at different " -"points during the lifecycle of this entity (e.g. across multiple rounds)" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" -"특정 실행에서 엔티티가 추가한 레코드를 보유하며 로컬에 유지됩니다. 즉, " -"저장된 데이터는 실행 중인 시스템을 벗어나지 않습니다. 모드를 실행할 때 중간 " -"저장소나 스크래치 패드로 사용할 수 있습니다. 또한 이 엔티티의 수명 주기 동안 " -"다른 시점에서 액세스하기 위한 메모리로도 사용할 수 있습니다(예: 여러 " -"라운드에 걸쳐)" +"이 사용자 가이드는 Flower를 사용해 기존 머신 러닝 워크로드를 연합된 환경으로 가져오고자 하는 연구자와 개발자를 대상으로 " +"합니다. Flower의 설계 목표 중 하나는 이를 간단하게 만드는 것이었습니다. 자세히 알아보려면 계속 읽어보세요." -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -msgid ":py:obj:`state `\\" -msgstr ":py:obj:`state `\\" +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "튜토리얼" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" -msgstr "연결 해제" +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "학습 중심의 연합 학습 튜토리얼 시리즈로, 시작하기에 가장 좋은 곳입니다." -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../source/index.rst:62 +#, fuzzy +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +msgstr "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`🤗 Transformers" +" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " +"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " +":doc:`Android ` | :doc:`iOS `" + +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" +msgstr "파이토치용 동영상 튜토리얼도 만들었습니다:" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" -msgstr "오류" +#: ../../source/index.rst:75 +msgid "And TensorFlow:" +msgstr "그리고 TensorFlow도:" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." -msgstr "오류 식별자입니다." +#: ../../source/index.rst:83 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "문제 중심의 방법 가이드는 특정 목표를 달성하는 방법을 단계별로 보여줍니다." -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "오류가 발생한 이유(예: 예외 스택 추적)" +#: ../../source/index.rst:116 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "이해 중심의 개념 가이드에서는 Flower와 협업 AI의 주요 주제와 기본 아이디어를 설명하고 토론합니다." -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: ../../source/index.rst:128 +msgid "References" +msgstr "참조" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." -msgstr "오류 코드." +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." +msgstr "정보 지향 API 참조 및 기타 참고 자료." -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../source/index.rst:139::1 +msgid ":py:obj:`flwr `\\" +msgstr ":py:obj:`flwr `\\" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." -msgstr "오류에 대해 보고된 사유입니다." +#: ../../source/index.rst:139::1 flwr:1 of +msgid "Flower main package." +msgstr "Flower 메인 패키지." -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -msgid "EvaluateIns" -msgstr "평가" +#: ../../source/index.rst:155 +msgid "Contributor docs" +msgstr "기여자 문서" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +#: ../../source/index.rst:157 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "Flower 커뮤니티는 여러분의 기여를 환영합니다. 다음 문서는 그 과정에서 도움을 드리기 위한 문서입니다." -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "Flower CLI 참조" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" -msgstr "EvaluateRes" +#: ../../source/ref-api-cli.rst:7 +#, fuzzy +msgid "flwr CLI" +msgstr "Flower 클라이언트." -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: ../../flwr:1 +#, fuzzy +msgid "flwr is the Flower command line interface." +msgstr "Flower ClientProxy 인스턴스 등록 해제." -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" -msgstr ":py:obj:`loss `\\" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Options" +msgstr "해결법" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: ../../flwr:1 +#, fuzzy +msgid "Install completion for the current shell." +msgstr "현재 실행에 대한 식별자입니다." -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +#: ../../flwr:1 +msgid "" +"Show completion for the current shell, to copy it or customize the " +"installation." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" -msgstr "이벤트 타입" +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../flwr build:1 +msgid "" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." +msgstr "" + +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" +msgstr "" + +#: ../../flwr install:1 +#, fuzzy +msgid "Install a Flower App Bundle." +msgstr "Flower 설치" + +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" +msgstr "" + +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +msgstr "" + +#: ../../flwr install:1 msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -":py:obj:`encode `\\ \\(\\[encoding\\, errors\\]" -"\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." -msgstr "인코딩용으로 등록된 코덱을 사용하여 문자열을 인코딩합니다." +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" +msgstr "" + +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" +msgstr "" + +#: ../../flwr install:1 +msgid "The desired install path." +msgstr "" + +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "빌드 전달인자" + +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "선택적 개선 사항" + +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" + +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log run +msgid "default" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "``True``" +msgstr "``DISTRO``" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "빌드 전달인자" + +#: ../../flwr log:1 +#, fuzzy +msgid "The Flower run ID to query" +msgstr "Flower 커뮤니티 가입하기" + +#: ../../flwr log:1 +msgid "Path of the Flower project to run" +msgstr "" + +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" + +#: ../../flwr new:1 +#, fuzzy +msgid "Create new Flower App." +msgstr "Flower 서버를 실행하세요." + +#: ../../flwr new:1 +msgid "The ML framework to use" +msgstr "" + +#: ../../flwr new +#, fuzzy +msgid "options" +msgstr "해결법" + +#: ../../flwr new:1 msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." -msgstr "이전 하위 문자열이 모두 새 하위 문자열로 바뀐 사본을 반환합니다." +#: ../../flwr new:1 +msgid "The Flower username of the author" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "Flower 기본 이미지의 태그." + +#: ../../flwr run:1 +#, fuzzy +msgid "Run Flower App." +msgstr "Flower 서버를 실행하세요." + +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" +msgstr "" + +#: ../../flwr run:1 msgid "" -":py:obj:`split `\\ \\(\\[sep\\, maxsplit\\]\\)" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -":py:obj:`split `\\ \\(\\[sep\\, maxsplit\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +#: ../../flwr run:1 msgid "" -"Return a list of the substrings in the string, using sep as the separator " -"string." -msgstr "sep를 구분 문자열로 사용하여 문자열의 하위 문자열 목록을 반환합니다." +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." +msgstr "" + +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" + +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``flwr/base``" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "Flower 기본 이미지의 태그." + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" + +#: ../../source/ref-api-cli.rst:16 +msgid "flower-simulation" +msgstr "flower 시뮬레이션" + +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower 초연결" + +#: ../../source/ref-api-cli.rst:36 +#, fuzzy +msgid "flower-supernode" +msgstr "Flower SuperNode" + +#: ../../source/ref-api-cli.rst:46 +msgid "flower-server-app" +msgstr "flower 서버 프로그램" + +#: ../../source/ref-api-cli.rst:50 +msgid "" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." +msgstr "" + +#: ../../source/ref-api-cli.rst:64 +#, fuzzy +msgid "flower-superexec" +msgstr "flower 초연결" + +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" +msgstr "flwr" + +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" +msgstr "Modules" + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.client `\\" +msgstr ":py:obj:`flwr.client `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 클라이언트." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.common `\\" +msgstr ":py:obj:`flwr.common `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "서버와 클라이언트 간에 공유되는 공통 구성 요소입니다." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.server `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 서버." + +#: ../../source/ref-api/flwr.rst:35::1 +#, fuzzy +msgid ":py:obj:`flwr.simulation `\\" +msgstr ":py:obj:`flwr.simulation `\\" + +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." +msgstr "Flower 시뮬레이션." + +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "클라이언트" + +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" +msgstr "함수" + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "Flower 서버에 연결되는 Flower 클라이언트 노드를 시작합니다." + +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" + +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "gRPC 서버에 연결되는 Flower NumPyClient를 시작합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" +msgstr "클래스" + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "Flower 클라이언트를 위한 추상 베이스 클래스입니다." + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." +msgstr "Flower ClientApp." + +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "NumPy를 사용하는 Flower 클라이언트를 위한 추상 베이스 클래스입니다." + +#: ../../source/ref-api/flwr.client.rst:50::1 +#, fuzzy +msgid ":py:obj:`flwr.client.mod `\\" +msgstr ":py:obj:`flwr.client.mod `\\" + +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +msgid "Flower Built-in Mods." +msgstr "Flower 내장 모드." + +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "Bases: :py:class:`~abc.ABC`" + +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" +msgstr "메소드" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 평가합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr ":py:obj:`fit `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 매개변수를 구체화합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." +msgstr "이 클라이언트에서 실행 컨텍스트를 가져옵니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr ":py:obj:`get_properties `\\ \\(ins\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "클라이언트의 속성 집합을 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." +msgstr "이 클라이언트에 실행 컨텍스트를 적용합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "클라이언트(자체)를 반환합니다." + +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" +msgstr "속성" + +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" + +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" + +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "파라미터" + +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 평가 지침입니다." + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" +msgstr "반환" + +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이터 예제 수와 같은 기타 세부 정보가 포함된 평가 결과입니다." + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" +msgstr "반환 타입" + +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 학습 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 학습 지침입니다." + +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "업데이트된 매개변수와 훈련에 사용된 로컬 훈련 예제 수와 같은 기타 세부 정보가 포함된 훈련 결과입니다." + +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "구성 값 dictionary이 포함된 서버에서 받은 매개변수 가져오기 명령어입니다." + +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "현재 로컬 모델 파라미터입니다." + +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "구성 값 dictionary이 포함된 서버로부터 받은 속성 가져오기 명령입니다." + +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "현재 클라이언트 속성입니다." + +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" +msgstr "클라이언트앱" + +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" +msgstr "Bases: :py:class:`object`" + +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "예시" + +#: flwr.client.client_app.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "" +"일반적인 `Client` 구현의 이름이 `FlowerClient`라고 가정하면, 다음과 같이 `ClientApp`으로 래핑할 수 " +"있습니다:" + +#: flwr.client.client_app.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "위의 코드가 'client'라는 Python 모듈에 있는 경우 다음과 같이 시작할 수 있습니다:" + +#: flwr.client.client_app.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" +"이 `client:app` 예제에서 `client`는 이전 코드가 있는 Python 모듈 `client.py`를 가리키고 " +"`app`는 `ClientApp` 유형의 객체를 가리키는 전역 속성 `app`을 가리킵니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr ":py:obj:`evaluate `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "클라이언트 앱에 평가함수를 등록하는 데코레이터를 반환합니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" +msgstr ":py:obj:`query `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." +msgstr "클라이언트 앱에 query fn을 등록하는 데코레이터를 반환합니다." + +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" +msgstr ":py:obj:`train `\\ \\(\\)" + +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." +msgstr "클라이언트 앱에 train fn을 등록하는 데코레이터를 반환합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "로컬로 보유한 데이터 세트를 사용하여 제공된 파라미터를 학습합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_properties `\\ " +"\\(config\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "클라이언트의 속성 집합을 반환합니다." + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" +":py:obj:`set_context `\\ " +"\\(context\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "객체를 클라이언트 유형으로 변환하고 반환합니다." + +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" + +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "현재(전역) 모델 매개변수입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "" +"서버가 클라이언트의 평가에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 평가에 사용되는 예제 수에 영향을 주기 위해 서버에서" +" 클라이언트로 임의의 값을 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" +"* **loss** (*float*) - 로컬 데이터 세트에서 모델의 평가 손실입니다. * **num_examples** " +"(*int*) -- 평가에 사용된 예제 수입니다. * **metrics** (*Dict[str, Scalar]*) -- 임의의 " +"문자열 키를 부울, 바이트, float, int 또는 str 유형의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 " +"다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "**loss** (*float*) -- 로컬 데이터 세트에서 모델의 평가 손실입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "**num_examples** (*int*) - 평가에 사용된 예제 수입니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" +"**metrics** (*Dict[str, Scalar]*) - 임의의 문자열 키를 bool, bytes, float, int 또는" +" str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" +"이전 반환 유형 형식(int, float, float)과 확장 형식(int, float, float, Dict[str, " +"Scalar])은 Flower 0.19부터 더 이상 사용되지 않으며 제거되었습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "" +"서버가 클라이언트의 훈련에 영향을 줄 수 있는 구성 매개변수입니다. 예를 들어 (로컬) 트레이닝 에포크 수를 설정하는 등 서버에서 " +"클라이언트로 임의의 값을 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" +"* **parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다. * **num_examples** " +"(*int*) -- 학습에 사용된 예제 수입니다. * **metrics** (*Dict[str, Scalar]*) - 임의의 문자열" +" 키를 bool, bytes, float, int,또는 str 타입의 값에 매핑하는 dictionary입니다. 임의의 값을 서버에 " +"다시 전달하는 데 사용할 수 있습니다." + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "**parameters** (*NDArrays*) - 로컬로 업데이트된 모델 파라미터입니다." + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "**num_examples** (*int*) - 트레이닝에 사용된 예제 수입니다." + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "" +"서버에서 요청한 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 매개변수가 필요한지 클라이언트에게 알려주는 데 사용할 수 " +"있습니다." + +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "**parameters** -- 로컬 모델 파라미터를 NumPy 배열 목록으로 표시합니다." + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "" +"서버에서 요청하는 구성 매개변수입니다. 이는 일부 스칼라 속성과 함께 어떤 속성이 필요한지 클라이언트에게 알려주는 데 사용할 수 " +"있습니다." + +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" +"**properties** -- 임의의 문자열 키를 bool, bytes, float, int 또는 str 타입의 값에 매핑하는 " +"dictionary입니다. 임의의 속성 값을 서버에 다시 전달하는 데 사용할 수 있습니다." + +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" +msgstr "mod" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +msgid "Client-side adaptive clipping modifier." +msgstr "클라이언트 측 적응형 클리핑 수정자." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +msgid "Client-side fixed clipping modifier." +msgstr "클라이언트 측 고정 클리핑 수정자." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." +msgstr "메시지 크기 수정." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +msgid "Parameters size mod." +msgstr "매개변수 크기 mod." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" +msgstr "" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." +msgstr "SecAgg 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" +msgstr "" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +msgid "" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." +msgstr "SecAgg+ 프로토콜에 따라 수신 메시지를 처리하고 결과를 반환합니다." + +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" +msgstr "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +msgid "Modifier for local differential privacy." +msgstr "로컬 차분 프라이버시를 위한 수정자." + +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" +msgstr "LocalDpMod" + +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +msgid "" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." +msgstr "이 모드는 클라이언트 모델 업데이트를 클립하고 서버로 보내기 전에 파라미터에 노이즈를 추가합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." +msgstr "이 함수는 `MessageType.TRAIN` 유형의 메시지에 대해 작동합니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." +msgstr "클리핑 기준값입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." +msgstr "클라이언트 모델의 민감도입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +msgid "" +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." +msgstr "개인정보 보호 예산. 엡실론 값이 작을수록 개인정보 보호 수준이 높음을 나타냅니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +msgid "" +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." +msgstr "" +"실패 확률입니다. 프라이버시 메커니즘이 원하는 수준의 프라이버시를 제공하지 못할 확률입니다. 델타 값이 작을수록 프라이버시가 더 " +"엄격하게 보장된다는 의미입니다." + +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" +msgstr "로컬 DP 모드의 인스턴스를 생성하고 클라이언트 측 모드에 추가합니다:" + +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" +msgstr "adaptiveclipping\\_mod" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." +msgstr "이 모드는 서버 측 전략 래퍼인 차분 프라이버시 클라이언트 측 적응형 클리핑과 함께 사용해야 합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." +msgstr "래퍼는 클라이언트에 clipping_norm 값을 전송합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." +msgstr "이 모드는 클라이언트 모델 업데이트를 서버로 보내기 전에 클립합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +msgid "" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." +msgstr "또한 새 클리핑 값을 계산하기 위해 서버로 KEY_NORM_BIT을 전송합니다." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "Notes" +msgstr "참고" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." +msgstr "여러 개를 사용할 때는 모드의 순서를 고려하세요." + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +msgstr "일반적으로 adaptiveclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." + +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" +msgstr "fixedclipping\\_mod" + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +msgid "" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +msgstr "이 모드는 서버 측 전략 래퍼인 DifferentialPrivacyClientSideFixedClipping과 함께 사용해야 합니다." + +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." +msgstr "일반적으로 fixedclipping_mod는 매개변수에서 가장 마지막으로 작동해야 합니다." + +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" +msgstr "make\\_ffn" + +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" +msgstr "message\\_size\\_mod" + +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." +msgstr "이 모드는 전송되는 메시지의 크기를 바이트 단위로 기록합니다." + +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +msgid "parameters\\_size\\_mod" +msgstr "parameters\\_size\\_mod" + +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +msgid "" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." +msgstr "이 모드는 메시지에서 전송된 매개변수의 수와 그 크기를 바이트 단위로 기록합니다." + +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "secagg\\_mod" + +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +msgid "secaggplus\\_mod" +msgstr "secaggplus\\_mod" + +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" +msgstr "start\\_client" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" +"서버의 IPv4 또는 IPv6 주소입니다. Flower 서버가 포트 8080의 동일한 컴퓨터에서 실행되는 경우 `서버_주소`는 " +"`\"[::]:8080\"`이 됩니다." + +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "클라이언트를 인스턴스화하는 호출 가능 항목입니다. (기본값: None)" + +#: flwr.client.app.start_client:9 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "추상 베이스 클래스 `flwr.client.Client`의 구현(기본값: None)" + +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" +"Flower 서버와 교환할 수 있는 gRPC 메시지의 최대 길이입니다. 기본값은 대부분의 모델에 충분합니다. 매우 큰 모델을 " +"훈련하는 사용자는 이 값을 늘려야 할 수도 있습니다. Flower 서버는 동일한 값으로 시작해야 " +"하며(`flwr.server.start_server` 참조), 그렇지 않으면 증가된 제한을 알지 못해 더 큰 메시지를 차단합니다." + +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" +"바이트 문자열 또는 경로 문자열로 PEM 인코딩된 루트 인증서. 제공하면 인증서를 사용하여 SSL이 활성화된 Flower 서버에 " +"보안 연결이 설정됩니다." + +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +msgid "" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." +msgstr "" +"True일 경우 안전하지 않은 gRPC 연결을 시작합니다. root_certificates`가 None인 경우 시스템 인증서를 " +"사용하여 False일 때 HTTPS 연결을 활성화합니다." + +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" +"전송 계층을 구성합니다. 허용되는 값입니다: - 'grpc-bidi': gRPC, 양방향 스트리밍 - 'grpc-rere': " +"gRPC, 요청-응답(실험적) - 'rest': HTTP(실험적)" + +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "" +"연결 오류 발생 시 클라이언트가 서버 연결을 포기하기 전에 시도하는 최대 횟수입니다. None으로 설정하면 시도 횟수에 제한이 " +"없습니다." + +#: flwr.client.app.start_client:35 of +msgid "" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "" +"연결 오류 발생 시 클라이언트가 서버에 대한 연결을 시도하지 않는 최대 기간입니다. None으로 설정하면 총 시간에는 제한이 " +"없습니다." + +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "안전하지 않은 서버 연결로 gRPC 클라이언트 시작하기:" + +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "시스템 인증서를 사용하여 SSL 사용 gRPC 클라이언트를 시작합니다:" + +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "제공된 인증서를 사용하여 SSL 지원 gRPC 클라이언트를 시작합니다:" + +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" +msgstr "start\\_numpy\\_client" + +#: flwr.client.app.start_numpy_client:5 of +msgid "" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" +"이 함수는 1.7.0부터 더 이상 사용되지 않습니다. 대신 :code:`flwr.client.start_client`를 사용하고 " +"먼저 :code:`to_client()` 메서드를 실행하여 :code:`NumPyClient`를 " +":code:`flwr.client.Client` 유형으로 변환합니다." + +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "추상 베이스 클래스 `flwr.client.NumPyClient`의 구현입니다." + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "공통" + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." +msgstr "NumPy에서 배열을 만듭니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "바이트에서 NumPy를 역직렬화합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "파일 및/또는 원격 로그 서버에 로깅을 구성합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "차단을 피하기 위해 create_event를 ThreadPoolExecutor에 제출합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "정수 심각도 'level'과 함께 'msg % args'를 기록합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "NumPy와 배열을 바이트열로 직렬화합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "NumPy 배열을 매개변수 객체로 변환합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "표준 시간대를 UTC로 설정하여 time.time()에서 날짜 시간을 생성합니다." + +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "매개변수 객체를 NumPy 배열로 변환합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." +msgstr "배열 유형." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +msgstr "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage는 하나의 결과 메시지를 저장하는 데 사용되는 컨테이너입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr ":py:obj:`Code `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "클라이언트 상태 코드." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." +msgstr "레코드를 설정합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +#, fuzzy +msgid "Context of your run." +msgstr "실행 상태." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "클라이언트에서 서버로 연결 해제 메시지를 보냅니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." +msgstr "발생한 오류에 대한 정보를 저장하는 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "클라이언트에 대한 지침을 평가합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "클라이언트의 응답을 평가합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "원격 분석 이벤트의 유형." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "고객을 위한 맞춤 지침." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "클라이언트의 적합성 응답." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "클라이언트에 대한 매개변수 요청입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "매개변수 반환 요청 시 응답합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "클라이언트에 대한 속성 요청." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "클라이언트의 속성 응답을 확인합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" +msgstr "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." +msgstr "애플리케이션을 사용하는 엔티티의 관점에서 애플리케이션의 상태입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr ":py:obj:`MessageType `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." +msgstr "메시지 타입." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." +msgstr "레거시 메시지 타입." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." +msgstr "현재 메시지와 관련된 메타데이터를 보유한 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy +msgid "Metrics recod." +msgstr "메트릭 기록." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`NDArrays `\\" +msgstr ":py:obj:`NDArray `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid "" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "모델 매개변수." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." +msgstr "매개변수 기록." + +#: ../../source/ref-api/flwr.common.rst:68::1 +#, fuzzy +msgid ":py:obj:`Properties `\\" +msgstr ":py:obj:`properties `\\" + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "서버에서 클라이언트로 메시지를 다시 연결합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "RecordSet은 매개변수, 메트릭 및 설정 그룹을 저장합니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage는 하나의 instruction 메시지를 저장하는 데 사용되는 컨테이너입니다." + +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" + +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "클라이언트 상태." + +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" +msgstr "배열" + +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "배열형 또는 텐서형 객체의 직렬화된 데이터와 그에 대한 일부 메타데이터를 포함하는 데이터 클래스입니다." + +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "직렬화된 객체의 데이터 유형을 나타내는 문자열(예: `np.float32`)" + +#: flwr.common.record.parametersrecord.Array:8 of +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "" +"직렬화되지 않은 배열과 같은 객체의 모양을 나타내는 목록입니다. 직렬화 방법에 따라 데이터를 역직렬화하는 데 사용되거나 단순히 " +"메타데이터 필드로 사용됩니다." + +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "배열형 또는 텐서형 객체에서 `데이터`의 바이트를 생성하는 데 사용되는 직렬화 메커니즘의 유형을 나타내는 문자열입니다." + +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." +msgstr "데이터를 포함하는 바이트 버퍼입니다." + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr ":py:obj:`numpy `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." +msgstr "배열을 NumPy 배열로 반환합니다." + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" +msgstr ":py:obj:`dtype `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" +msgstr ":py:obj:`shape `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" +msgstr ":py:obj:`stype `\\" + +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" +msgstr ":py:obj:`data `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" +msgstr "클라이언트 메시지" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" +msgstr ":py:obj:`evaluate_res `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" +msgstr ":py:obj:`fit_res `\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" +":py:obj:`get_parameters_res " +"`\\" + +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" +":py:obj:`get_properties_res " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" +msgstr "코드" + +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" +msgstr ":py:obj:`OK `\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" + +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" + +#: ../../source/ref-api/flwr.common.Config.rst:2 +#, fuzzy +msgid "Config" +msgstr "구성" + +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" +msgstr "컨피그 레코드" + +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#, fuzzy +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " +"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " +":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" +" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" + +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" +msgstr "" + +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." +msgstr "이 객체에 저장된 바이트 수를 반환합니다." + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "키를 찾을 수 없으면 주어진 경우 d가 반환되고, 그렇지 않으면 KeyError가 발생합니다." + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" + +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." +msgstr "이 함수는 booleans을 1바이트를 차지하는 것으로 계산합니다." + +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" +msgstr "컨텍스트" + +#: flwr.common.context.Context:3 of +#, fuzzy +msgid "The ID that identifies the node." +msgstr "오류 식별자입니다." + +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" + +#: flwr.common.context.Context:8 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "" +"특정 실행에서 엔티티가 추가한 레코드를 보유하며 로컬에 유지됩니다. 즉, 저장된 데이터는 실행 중인 시스템을 벗어나지 않습니다. " +"모드를 실행할 때 중간 저장소나 스크래치 패드로 사용할 수 있습니다. 또한 이 엔티티의 수명 주기 동안 다른 시점에서 액세스하기 " +"위한 메모리로도 사용할 수 있습니다(예: 여러 라운드에 걸쳐)" + +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" +msgstr "" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`state `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" +msgstr "연결 해제" + +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" + +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" +msgstr "오류" + +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." +msgstr "오류 식별자입니다." + +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "오류가 발생한 이유(예: 예외 스택 추적)" + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" + +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." +msgstr "오류 코드." + +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" + +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." +msgstr "오류에 대해 보고된 사유입니다." + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" +msgstr "평가" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" +msgstr "EvaluateRes" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" +msgstr ":py:obj:`loss `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" + +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" +msgstr "이벤트 타입" + +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." +msgstr "인코딩용으로 등록된 코덱을 사용하여 문자열을 인코딩합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "이전 하위 문자열이 모두 새 하위 문자열로 바뀐 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "sep를 구분 문자열로 사용하여 문자열의 하위 문자열 목록을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr ":py:obj:`join `\\ \\(iterable\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." +msgstr "원하는 수의 문자열을 연결합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr ":py:obj:`capitalize `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." +msgstr "대문자로 된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr ":py:obj:`casefold `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "대소문자 구분 없는 비교에 적합한 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" +msgstr ":py:obj:`title `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." +msgstr "각 단어의 제목이 대소문자로 구분된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." +msgstr "길이 너비의 가운데 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "문자열 S[start:end]에서 하위 문자열 sub이 겹치지 않는 횟수를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "모든 탭 문자가 공백을 사용하여 확장된 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "하위 문자열 sub이 발견되는 S에서 하위가 S[start:end] 내에 포함되는 가장 낮은 인덱스를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition `\\ \\(sep\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." +msgstr "지정된 구분 기호를 사용하여 문자열을 세 부분으로 분할합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." +msgstr "왼쪽으로 정렬된 길이의 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr ":py:obj:`lower `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." +msgstr "소문자로 변환된 문자열 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." +msgstr "선행 공백이 제거된 문자열의 복사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "부분 문자열 sub이 발견되는 곳에서 sub이 S[start:end] 내에 포함되도록 S에서 가장 높은 인덱스를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." +msgstr "길이 너비의 오른쪽 정렬된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "후행 공백이 제거된 문자열의 복사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "문자열의 줄 목록을 줄 경계에서 구분하여 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`strip `\\ \\(\\[chars\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "선행 및 후행 공백이 제거된 문자열 사본을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr ":py:obj:`swapcase `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of +msgid "" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "대문자를 소문자로, 소문자를 대문자로 변환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr ":py:obj:`translate `\\ \\(table\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." +msgstr "주어진 번역 테이블을 사용하여 문자열의 각 문자를 바꿉니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr ":py:obj:`upper `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." +msgstr "Return a copy of the string converted to uppercase." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "S가 지정된 접미사로 끝나면 True를 반환하고 그렇지 않으면 False을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." +msgstr "주어진 접두사 문자열이 있는 경우 제거된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." +msgstr "주어진 접미사 문자열이 있는 경우 제거된 문자열을 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr ":py:obj:`isascii `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "문자열의 모든 문자가 ASCII인 경우 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr ":py:obj:`islower `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "문자열이 소문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr ":py:obj:`isupper `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "문자열이 대문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr ":py:obj:`istitle `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "문자열이 제목 대/소문자가 구분된 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr ":py:obj:`isspace `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "문자열이 공백 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr ":py:obj:`isdecimal `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "문자열이 10진수 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr ":py:obj:`isdigit `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." +msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr ":py:obj:`isnumeric `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr ":py:obj:`isalpha `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "문자열이 알파벳 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr ":py:obj:`isalnum `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "문자열이 영-숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr ":py:obj:`isidentifier `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "문자열이 유효한 파이썬 식별자인 경우 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr ":py:obj:`isprintable `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." +msgstr "문자열을 인쇄할 수 있으면 True를 반환하고, 그렇지 않으면 False를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr ":py:obj:`zfill `\\ \\(width\\, \\/\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "숫자 문자열을 왼쪽에 0으로 채워서 지정된 너비의 필드를 채웁니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "args와 kwarg의 치환을 사용하여 형식이 지정된 S를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr ":py:obj:`format_map `\\ \\(mapping\\)" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다." + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" +msgstr ":py:obj:`maketrans `\\" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." +msgstr "str.translate()에 사용할 수 있는 번역 테이블을 반환합니다." + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" +msgstr ":py:obj:`PING `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr ":py:obj:`START_CLIENT_ENTER `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr ":py:obj:`START_CLIENT_LEAVE `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr ":py:obj:`START_SERVER_ENTER `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr ":py:obj:`START_SERVER_LEAVE `\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +#, fuzzy +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" + +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" + +#: flwr.common.EventType.capitalize:3 of +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "보다 구체적으로, 첫 번째 문자는 대문자로, 나머지는 소문자로 만듭니다." + +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "패딩은 지정된 채우기 문자를 사용하여 수행됩니다(기본값은 공백)." + +#: flwr.common.EventType.count:1 of +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "" +"문자열 S[start:end]에서 부분 문자열 sub의 겹치지 않는 횟수를 반환합니다. 선택적 인자 start와 end는 슬라이스" +" 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.encode:3 of +msgid "encoding" +msgstr "인코딩" + +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." +msgstr "문자열을 인코딩합니다." + +#: flwr.common.EventType.encode:9 of +msgid "errors" +msgstr "오류" + +#: flwr.common.EventType.encode:6 of +msgid "" +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." +msgstr "" +"인코딩 오류에 사용할 오류 처리 방식입니다. 기본값은 'strict'로, 인코딩 오류가 발생하면 UnicodeEncodeError를" +" 발생시킵니다. 다른 가능한 값으로는 'ignore', 'replace', 'xmlcharrefreplace', 그리고 " +"UnicodeEncodeError를 처리할 수 있는 codecs.register_error에 등록된 다른 이름도 사용할 수 " +"있습니다." + +#: flwr.common.EventType.endswith:1 of +msgid "" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." +msgstr "" +"S가 지정된 접미사로 끝나면 True를 반환하고, 그렇지 않으면 False를 반환합니다. 시작 옵션을 사용하면 해당 위치부터 S를 " +"테스트합니다. end 옵션을 사용하면 해당 위치에서 S 비교를 중지합니다. 접미사는 시도할 문자열의 튜플일 수도 있습니다." + +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "탭 크기를 지정하지 않으면 크기가 8로 지정됩니다." + +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" +"부분 문자열 sub가 발견되는 곳의 가장 낮은 인덱스를 반환하며, sub는 S[start:end] 내에 포함되어야 합니다. 선택적" +" 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." +msgstr "실패 시 -1을 반환합니다." + +#: flwr.common.EventType.format:1 of +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "args와 kwargs의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 '}')로 식별됩니다." + +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 '}')로 식별됩니다." + +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." +msgstr "부분 문자열을 찾을 수 없을 때 ValueError를 발생시킵니다." + +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "문자열의 모든 문자가 영숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 영-숫자입니다." + +#: flwr.common.EventType.isalpha:3 of +msgid "" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 알파벳이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 알파벳입니다." + +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "ASCII 문자는 U+0000-U+007F 범위의 코드 포인트가 있습니다. 빈 문자열도 ASCII입니다." + +#: flwr.common.EventType.isdecimal:3 of +msgid "" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "문자열의 모든 문자가 10진수이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 10진수 문자열입니다." + +#: flwr.common.EventType.isdigit:3 of +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 숫자 문자열입니다." + +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "" +"keyword.iskeyword(s)를 호출하여 문자열 s가 \"def\" 또는 \"class\"와 같은 예약 식별자인지 " +"테스트합니다." + +#: flwr.common.EventType.islower:3 of +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "문자열이 모두 소문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 소문자입니다." + +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 숫자입니다." + +#: flwr.common.EventType.isprintable:3 of +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "문자열은 repr()에서 모든 문자가 인쇄 가능한 것으로 간주되거나 비어 있는 경우 인쇄할 수 있습니다." + +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "문자열의 모든 문자가 공백이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 공백입니다." + +#: flwr.common.EventType.istitle:3 of +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "제목 대/소문자 문자열에서 대문자와 제목 대문자는 대소문자만, 소문자는 대문자만 뒤에 올 수 있습니다." + +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "문자열의 모든 문자가 대문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 대문자입니다." + +#: flwr.common.EventType.join:3 of +msgid "" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "메서드가 호출되는 문자열은 주어진 각 문자열 사이에 삽입됩니다. 결과는 새 문자열로 반환됩니다." + +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" + +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "None이 아닌 문자가 지정되면 대신 문자열에서 문자를 제거합니다." + +#: flwr.common.EventType.maketrans:3 of +msgid "" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." +msgstr "" +"argument이 하나만 있는 경우, 유니코드 서수(정수) 또는 문자를 유니코드 서수, 문자열 또는 None에 매핑하는 " +"dictionary이어야 합니다. 그러면 문자 키가 서수로 변환됩니다. 인수가 두 개이면 길이가 같은 문자열이어야 하며, 결과 " +"dictionary에서 x의 각 문자는 y의 같은 위치에 있는 문자에 매핑됩니다. 세 번째 인수가 있으면 문자열이어야 하며, 그 " +"문자는 결과에서 None에 매핑됩니다." + +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "" +"문자열에서 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 기호 뒤 부분을 포함하는 " +"3-tuple을 반환합니다." + +#: flwr.common.EventType.partition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "구분 기호를 찾을 수 없으면 원래 문자열과 빈 문자열 2개를 포함하는 3-튜플을 반환합니다." + +#: flwr.common.EventType.removeprefix:3 of +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "문자열이 접두사 문자열로 시작하면 문자열[len(prefix):]을 반환합니다. 그렇지 않으면 원본 문자열의 복사본을 반환합니다." + +#: flwr.common.EventType.removesuffix:3 of +msgid "" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "" +"문자열이 접미사 문자열로 끝나고 해당 접미사가 비어 있지 않으면 문자열[:-len(suffix)]을 반환합니다. 그렇지 않으면 원본" +" 문자열의 복사본을 반환합니다." + +#: flwr.common.EventType.replace:5 of +msgid "count" +msgstr "카운트" + +#: flwr.common.EventType.replace:4 of +msgid "" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "대체할 최대 발생 횟수입니다. -1(기본값)은 모든 항목을 교체한다는 의미입니다." + +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "선택적 argument 개수를 지정하면 첫 번째 개수만 바뀝니다." + +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +msgid "" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "" +"부분 문자열 sub가 발견되는 곳의 가장 높은 인덱스를 반환하며, sub는 S[start:end] 내에 포함되어야 합니다. 선택적" +" 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." + +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "" +"그러면 문자열에서 끝 부분부터 시작하여 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 " +"기호 뒤 부분을 포함하는 3-tuple을 반환합니다." + +#: flwr.common.EventType.rpartition:7 of +msgid "" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "구분 기호를 찾을 수 없는 경우 빈 문자열 2개와 원래 문자열을 포함하는 3-tuple을 반환합니다." + +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" +msgstr "sep" + +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." +msgstr "문자열을 분할하는 데 사용되는 구분 기호입니다." + +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +msgid "" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "" +"None(기본값)으로 설정하면 모든 공백 문자(\\\\n \\\\r \\\\t \\\\f 및 공백 포함)를 분할하고 결과에서 빈 " +"문자열을 삭제합니다." + +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" +msgstr "maxsplit" + +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "최대 분할 횟수(왼쪽부터 시작). -1(기본값)은 제한이 없음을 의미합니다." + +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." +msgstr "분할은 문자열 끝에서 시작하여 앞쪽으로 진행됩니다." + +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "" +"참고로 str.split()은 주로 의도적으로 구분된 데이터에 유용합니다. 구두점이 포함된 자연 텍스트의 경우 정규식 모듈을 " +"사용하는 것이 좋습니다." + +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "줄 바꿈은 keepends가 주어지고 참이 아니면 결과 목록에 포함되지 않습니다." + +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." +msgstr "" +"S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 반환합니다. 시작 옵션을 사용하면 해당 위치에서 " +"시작되는 S를 테스트합니다. 선택적 end를 사용하면 해당 위치에서 S 비교를 중지합니다. 접두사는 시도할 문자열의 튜플일 수도 " +"있습니다." + +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "보다 구체적으로, 단어는 대문자로 시작하고 나머지 모든 대소문자는 소문자로 표기합니다." + +#: flwr.common.EventType.translate:5 of +msgid "table" +msgstr "table" + +#: flwr.common.EventType.translate:4 of +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "유니코드 서수를 유니코드 서수, 문자열 또는 없음으로 매핑하는 번역 테이블이어야 합니다." + +#: flwr.common.EventType.translate:7 of +msgid "" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." +msgstr "" +"테이블은 사전이나 목록과 같이 __getitem__을 통해 조회/색인을 구현해야 합니다. 이 작업에서 LookupError가 " +"발생하면 문자는 그대로 유지됩니다. 없음으로 매핑된 문자는 삭제됩니다." + +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." +msgstr "문자열은 잘리지 않습니다." + +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" +msgstr "FitIns" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" +msgstr "FitRes" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" + +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" +msgstr "GetParametersIns" + +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" +msgstr "GetParametersRes" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" +msgstr "GetPropertiesIns" + +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" +msgstr "GetPropertiesRes" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" + +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" +msgstr ":py:obj:`properties `\\" + +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" +msgstr "Message" + +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." +msgstr "실행할 메시지에 대한 정보를 포함한 데이터 클래스입니다." + +#: flwr.common.message.Message:5 of +msgid "" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "다른 엔터티(예: 서버 측 로직이 클라이언트로 전송하거나 그 반대로 전송하는 등)가 전송했거나 전송할 레코드를 보유합니다." + +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "다른 메시지를 처리할 때 발생한 오류에 대한 정보를 캡처하는 데이터 클래스입니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" +msgstr "" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." +msgstr "오류가 발생했음을 나타내는 답장 메시지를 작성합니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" +msgstr "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." +msgstr "지정된 콘텐츠와 TTL을 사용하여 이 메시지에 대한 답글을 작성합니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr ":py:obj:`has_content `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." +msgstr "메시지에 콘텐츠가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr ":py:obj:`has_error `\\ \\(\\)" + +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." +msgstr "메시지에 오류가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" +msgstr ":py:obj:`content `\\" + +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of +msgid "The content of this message." +msgstr "이 메시지의 내용입니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" +msgstr ":py:obj:`error `\\" + +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." +msgstr "이 메시지가 캡처한 오류입니다." + +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" +msgstr ":py:obj:`metadata `\\" + +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." +msgstr "오류가 발생했습니다." + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" +msgstr "" +"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 전까지 남은 시간을 기준으로 설정됩니다. 이는 다음과 " +"같은 공식을 따릅니다: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" + +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" +msgstr "" +"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 전까지 남은 시간을 기준으로 설정됩니다. 이는 다음 " +"공식을 따릅니다:" + +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" + +#: flwr.common.message.Message.create_reply:3 of +msgid "" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." +msgstr "" +"이 메서드는 이 메시지에 대한 응답으로 새로운 '메시지'를 생성합니다. 이 메시지에서 'run_id', 'src_node_id', " +"'dst_node_id', 'message_type'을 상속하고 'reply_to_message'를 이 메시지의 ID로 설정합니다." + +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." +msgstr "답장 메시지의 콘텐츠입니다." + +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." +msgstr "답장을 나타내는 새로운 `메시지` 인스턴스입니다." + +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" +msgstr "MessageType" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" +msgstr ":py:obj:`EVALUATE `\\" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" +msgstr ":py:obj:`QUERY `\\" + +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" +msgstr ":py:obj:`TRAIN `\\" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" +msgstr "MessageTypeLegacy" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr ":py:obj:`GET_PARAMETERS `\\" + +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr ":py:obj:`GET_PROPERTIES `\\" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." +msgstr "현재 실행에 대한 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." +msgstr "현재 메시지의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." +msgstr "이 메시지를 보내는 노드의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." +msgstr "이 메시지를 수신하는 노드의 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." +msgstr "이 메시지가 회신하는 메시지의 식별자입니다." + +#: flwr.common.message.Metadata:13 of +msgid "" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "메시지를 그룹화하기 위한 식별자입니다. 일부 설정에서는 FL 라운드로 사용됩니다." + +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." +msgstr "이 메시지의 유효 시간(초)입니다." + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "수신 측에서 실행할 작업을 인코딩하는 문자열입니다." + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" +msgstr ":py:obj:`created_at `\\" + +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." +msgstr "메시지가 생성된 때의 Unix timestamp입니다." + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" +msgstr ":py:obj:`dst_node_id `\\" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" +msgstr ":py:obj:`group_id `\\" + +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." +msgstr "메시지를 그룹화하기 위한 식별자입니다." + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" +msgstr ":py:obj:`message_id `\\" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" +msgstr ":py:obj:`message_type `\\" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" +msgstr ":py:obj:`reply_to_message `\\" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`run_id `\\" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" +msgstr ":py:obj:`ttl `\\" + +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: of +msgid "Time-to-live for this message." +msgstr "이 메시지를 기다리는 시간입니다." + +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +#, fuzzy +msgid "Metrics" +msgstr "MetricsRecord" + +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" +msgstr "MetricsRecord" + +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#, fuzzy +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" +" [:py:class:`float`]]" + +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:12 of +msgid "" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:20 of +msgid "" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:28 of +msgid "" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:39 of +msgid "" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." +msgstr "" + +#: flwr.common.record.metricsrecord.MetricsRecord:50 of +msgid "" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" + +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" +msgstr "NDArray" + +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +#, fuzzy +msgid "NDArrays" +msgstr "NDArray" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" +msgstr ":py:obj:`tensors `\\" + +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" +msgstr ":py:obj:`tensor_type `\\" + +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" +msgstr "ParametersRecord" + +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" + +#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#, fuzzy +msgid "" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." +msgstr "" +"Arrays라는 이름의 데이터 클래스를 순서대로 저장합니다. 즉, OrderedDict[str, Array]로 항목을 보유합니다. " +"ParametersRecord 객체는 파이토치의 state_dict와 동등한 것으로 볼 수 있지만, 대신 직렬화된 텐서를 " +"보유합니다." + +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:12 of +msgid "" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:23 of +msgid "" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +#, fuzzy +msgid "Let's see some examples:" +msgstr "몇 가지 예를 살펴보겠습니다:" + +#: flwr.common.record.parametersrecord.ParametersRecord:50 of +msgid "" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:65 of +msgid "" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:83 of +msgid "" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" +msgstr "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" + +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +msgid "" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "역직렬화에 필요한 직렬화된 객체의 메타데이터(예: NumPy 배열)에 해당하는 소량의 바이트도 이 카운팅에 포함될 수 있습니다." + +#: ../../source/ref-api/flwr.common.Properties.rst:2 +#, fuzzy +msgid "Properties" +msgstr "GetPropertiesRes" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" +msgstr "ReconnectIns" + +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" +msgstr ":py:obj:`seconds `\\" + +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" +msgstr "RecordSet" + +#: flwr.common.record.recordset.RecordSet:3 of +msgid "" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." +msgstr "" + +#: flwr.common.record.recordset.RecordSet:9 of +msgid "" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." +msgstr "" + +#: flwr.common.record.recordset.RecordSet:12 of +msgid "" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." +msgstr "" + +#: flwr.common.record.recordset.RecordSet:16 of +msgid "" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." +msgstr "" + +#: flwr.common.record.recordset.RecordSet:24 of +msgid "" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." +msgstr "" + +#: flwr.common.record.recordset.RecordSet:29 of +#, fuzzy +msgid "Let's see an example." +msgstr "몇 가지 예를 살펴보겠습니다:" + +#: flwr.common.record.recordset.RecordSet:47 of +msgid "" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." +msgstr "" + +#: flwr.common.record.recordset.RecordSet:66 of +msgid "" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." +msgstr "" + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" +msgstr ":py:obj:`configs_records `\\" + +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." +msgstr "Dictionary holding ConfigsRecord instances." + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" +msgstr ":py:obj:`metrics_records `\\" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." +msgstr "Dictionary holding MetricsRecord instances." + +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" +msgstr ":py:obj:`parameters_records `\\" + +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." +msgstr "Dictionary holding ParametersRecord instances." + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" +msgstr "ServerMessage" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" +msgstr ":py:obj:`evaluate_ins `\\" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" +msgstr ":py:obj:`fit_ins `\\" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_ins " +"`\\" +msgstr "" +":py:obj:`get_parameters_ins " +"`\\" + +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid "" +":py:obj:`get_properties_ins " +"`\\" +msgstr "" +":py:obj:`get_properties_ins " +"`\\" + +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" +msgstr "Status" + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" + +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" +msgstr ":py:obj:`message `\\" + +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" +msgstr "array\\_from\\_numpy" + +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" +msgstr "bytes\\_to\\_ndarray" + +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" +msgstr "구성" + +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" +msgstr "이벤트" + +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" +msgstr "로그" + +#: logging.Logger.log:3 of +msgid "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "예외 정보를 전달하려면 키워드 argument exc_info를 참 값과 함께 사용합니다." + +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" +msgstr "" + +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" +msgstr "" + +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" +msgstr "" + +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "서버" + +#: ../../source/ref-api/flwr.server.rst:22::1 +msgid "" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "gRPC transport layer를 사용하여 Flower 서버를 실행하세요." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 클라이언트를 관리하기 위한 Abstract base class." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." +msgstr "Driver API를 위한 Abstract base Driver class." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." +msgstr "메트릭 콜렉션을 훈련 및 평가하기 위한 History class." + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid "" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." +msgstr "레거시 콘텍스트." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." +msgstr "Flower 서버." + +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid "" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." +msgstr "Flower 서버 설정." + +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." +msgstr "사용 가능한 클라이언트 그룹 제공." + +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#, fuzzy +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +msgid "Workflows." +msgstr "" + +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." +msgstr "Flower ClientProxy 인스턴스 등록 해제." + +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." +msgstr "적어도 1개의 `num_clients` 가 사용 가능해질 때까지 기다리세요." + +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." +msgstr "" + +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of +msgid "" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." +msgstr "" + +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`push_messages `\\ " +"\\(messages\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid ":py:obj:`run `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid "Run information." +msgstr "시뮬레이션 실행" + +#: flwr.server.driver.driver.Driver.create_message:3 of +msgid "" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:6 of +msgid "" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:14 of +msgid "" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:17 of +msgid "" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." +msgstr "" + +#: flwr.server.driver.driver.Driver.create_message:23 of +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:3 of +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "" + +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:3 of +msgid "" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." +msgstr "" + +#: flwr.server.driver.driver.Driver.push_messages:9 of +msgid "" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:3 of +msgid "" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "" + +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." +msgstr "" + +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" +msgstr "" + +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`config `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`strategy `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`history `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`state `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr "" + +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" +msgstr "" + +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "" + +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "" + +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +msgid "ServerAppComponents" +msgstr "" + +#: flwr.server.serverapp_components.ServerAppComponents:3 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." +msgstr "" + +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "" + +#: flwr.server.serverapp_components.ServerAppComponents:9 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." +msgstr "" + +#: flwr.server.serverapp_components.ServerAppComponents:13 of +msgid "" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid "" +":py:obj:`client_manager " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`server `\\" +msgstr ":py:obj:`flwr.server `\\" + +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +#, fuzzy +msgid ":py:obj:`strategy `\\" +msgstr ":py:obj:`state `\\" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" +msgstr "" + +#: flwr.server.server_config.ServerConfig:3 of +msgid "" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" +msgstr "" + +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`register `\\ " +"\\(client\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`unregister `\\ " +"\\(client\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +msgid "" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "" + +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" +msgstr "" + +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" +msgstr "" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "" + +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.server.app.start_server:12 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." +msgstr "" + +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "" + +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "" + +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "" + +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, maxsplit\\]\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`rsplit `\\ \\(\\[sep\\, maxsplit\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." -msgstr "원하는 수의 문자열을 연결합니다." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr ":py:obj:`capitalize `\\ \\(\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." -msgstr "대문자로 된 문자열을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr ":py:obj:`casefold `\\ \\(\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." -msgstr "대소문자 구분 없는 비교에 적합한 문자열을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`title `\\ \\(\\)" -msgstr ":py:obj:`title `\\ \\(\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." -msgstr "각 단어의 제목이 대소문자로 구분된 문자열을 반환합니다." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -":py:obj:`center `\\ \\(width\\[\\, fillchar\\]" -"\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." -msgstr "길이 너비의 가운데 문자열을 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Return the number of non-overlapping occurrences of substring sub in string " -"S[start:end]." -msgstr "문자열 S[start:end]에서 하위 문자열 sub이 겹치지 않는 횟수를 반환합니다." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`expandtabs `\\ \\(\\[tabsize\\]\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`expandtabs `\\ \\(\\[tabsize\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." -msgstr "모든 탭 문자가 공백을 사용하여 확장된 사본을 반환합니다." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, end\\]" -"\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"Return the lowest index in S where substring sub is found, such that sub is " -"contained within S[start:end]." -msgstr "하위 문자열 sub이 발견되는 S에서 하위가 S[start:end] 내에 포함되는 가장 낮은 " -"인덱스를 반환합니다." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -":py:obj:`partition `\\ \\(sep\\, \\/\\)" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -":py:obj:`partition `\\ \\(sep\\, \\/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." -msgstr "지정된 구분 기호를 사용하여 문자열을 세 부분으로 분할합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, fillchar\\]\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -":py:obj:`ljust `\\ \\(width\\[\\, fillchar\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." -msgstr "왼쪽으로 정렬된 길이의 문자열을 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr ":py:obj:`lower `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." -msgstr "소문자로 변환된 문자열 사본을 반환합니다." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." -msgstr "선행 공백이 제거된 문자열의 복사본을 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of msgid "" -"Return the highest index in S where substring sub is found, such that sub is " -"contained within S[start:end]." -msgstr "부분 문자열 sub이 발견되는 곳에서 sub이 S[start:end] 내에 포함되도록 S에서 " -"가장 높은 인덱스를 반환합니다." +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -":py:obj:`rindex `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, fillchar\\]\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -":py:obj:`rjust `\\ \\(width\\[\\, fillchar\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." -msgstr "길이 너비의 오른쪽 정렬된 문자열을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +msgid "" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." -msgstr "후행 공백이 제거된 문자열의 복사본을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -":py:obj:`splitlines `\\ \\(\\[keepends\\]\\" -")" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." -msgstr "문자열의 줄 목록을 줄 경계에서 구분하여 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"Return a copy of the string with leading and trailing whitespace removed." -msgstr "선행 및 후행 공백이 제거된 문자열 사본을 반환합니다." +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr ":py:obj:`swapcase `\\ \\(\\)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." -msgstr "대문자를 소문자로, 소문자를 대문자로 변환합니다." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`translate `\\ \\(table\\, \\/\\)" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`translate `\\ \\(table\\, \\/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." -msgstr "주어진 번역 테이블을 사용하여 문자열의 각 문자를 바꿉니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr ":py:obj:`upper `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." -msgstr "Return a copy of the string converted to uppercase." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\, " -"start\\[\\, end\\]\\]\\)" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`startswith `\\ \\(prefix\\[\\, " -"start\\[\\, end\\]\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." -msgstr "S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`endswith `\\ \\(suffix\\[\\, start\\" -"[\\, end\\]\\]\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." -msgstr "S가 지정된 접미사로 끝나면 True를 반환하고 그렇지 않으면 False을 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`removeprefix `\\ \\(prefix\\, " -"\\/\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`removeprefix `\\ \\(prefix\\, \\" -"/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." -msgstr "주어진 접두사 문자열이 있는 경우 제거된 문자열을 반환합니다." -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`removesuffix `\\ \\(suffix\\, " -"\\/\\)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -":py:obj:`removesuffix `\\ \\(suffix\\, \\" -"/\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." -msgstr "주어진 접미사 문자열이 있는 경우 제거된 문자열을 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr ":py:obj:`isascii `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." -msgstr "문자열의 모든 문자가 ASCII인 경우 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr ":py:obj:`islower `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." -msgstr "문자열이 소문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr ":py:obj:`isupper `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." -msgstr "문자열이 대문자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr ":py:obj:`istitle `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." -msgstr "문자열이 제목 대/소문자가 구분된 문자열이면 True를 반환하고, 그렇지 않으면 " -"False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr ":py:obj:`isspace `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." -msgstr "문자열이 공백 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr ":py:obj:`isdecimal `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "문자열이 10진수 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr ":py:obj:`isdigit `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." -msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr ":py:obj:`isnumeric `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." -msgstr "문자열이 숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr ":py:obj:`isalpha `\\ \\(\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "문자열이 알파벳 문자열이면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of +msgid "" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr ":py:obj:`isalnum `\\ \\(\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +msgid "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." -msgstr "문자열이 영-숫자 문자열이면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of +msgid "" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr ":py:obj:`isidentifier `\\ \\(\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Return True if the string is a valid Python identifier, False otherwise." -msgstr "문자열이 유효한 파이썬 식별자인 경우 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr ":py:obj:`isprintable `\\ \\(\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." -msgstr "문자열을 인쇄할 수 있으면 True를 반환하고, 그렇지 않으면 False를 반환합니다." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given " -"width." -msgstr "숫자 문자열을 왼쪽에 0으로 채워서 지정된 너비의 필드를 채웁니다." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`format `\\ \\(\\*args\\, \\*\\*" -"kwargs\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Return a formatted version of S, using substitutions from args and kwargs." -msgstr "args와 kwarg의 치환을 사용하여 형식이 지정된 S를 반환합니다." - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr ":py:obj:`format_map `\\ \\(mapping\\)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." -msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`maketrans `\\" -msgstr ":py:obj:`maketrans `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +msgid "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." -msgstr "str.translate()에 사용할 수 있는 번역 테이블을 반환합니다." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of +msgid "" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" -msgstr ":py:obj:`PING `\\" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_CLIENT_ENTER `\\" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`START_CLIENT_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_CLIENT_LEAVE `\\" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`START_CLIENT_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_SERVER_ENTER `\\" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`START_SERVER_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_SERVER_LEAVE `\\" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`START_SERVER_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_DRIVER_API_ENTER `\\" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -":py:obj:`RUN_DRIVER_API_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE `\\" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -":py:obj:`RUN_DRIVER_API_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_FLEET_API_ENTER `\\" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`RUN_FLEET_API_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_FLEET_API_LEAVE `\\" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`RUN_FLEET_API_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERLINK_ENTER `\\" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE `\\" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_SIMULATION_ENTER `\\" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`START_SIMULATION_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_SIMULATION_LEAVE `\\" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -":py:obj:`START_SIMULATION_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr ":py:obj:`DRIVER_CONNECT `\\" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" +msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_DRIVER_ENTER `\\" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`START_DRIVER_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`START_DRIVER_LEAVE `\\" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`START_DRIVER_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER `\\" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." msgstr "" -":py:obj:`RUN_CLIENT_APP_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE `\\" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`RUN_CLIENT_APP_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SERVER_APP_ENTER `\\" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`RUN_SERVER_APP_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE `\\" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`RUN_SERVER_APP_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERNODE_ENTER `\\" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -":py:obj:`RUN_SUPERNODE_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE `\\" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`RUN_SUPERNODE_LEAVE `\\" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER `\\" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`RUN_SUPEREXEC_ENTER `\\" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE `\\" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" msgstr "" -":py:obj:`RUN_SUPEREXEC_LEAVE `\\" -#: flwr.common.EventType.capitalize:3 of -msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "보다 구체적으로, 첫 번째 문자는 대문자로, 나머지는 소문자로 만듭니다." +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "" -"Padding is done using the specified fill character (default is a space)." -msgstr "패딩은 지정된 채우기 문자를 사용하여 수행됩니다(기본값은 공백)." +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" -#: flwr.common.EventType.count:1 of -msgid "" -"Return the number of non-overlapping occurrences of substring sub in string " -"S[start:end]. Optional arguments start and end are interpreted as in slice " -"notation." +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -"문자열 S[start:end]에서 부분 문자열 sub의 겹치지 않는 횟수를 반환합니다. " -"선택적 인자 start와 end는 슬라이스 표기법과 같이 해석됩니다." -#: flwr.common.EventType.encode:3 of -msgid "encoding" -msgstr "인코딩" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." -msgstr "문자열을 인코딩합니다." +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" -msgstr "오류" +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" -#: flwr.common.EventType.encode:6 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as " -"any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"인코딩 오류에 사용할 오류 처리 방식입니다. 기본값은 'strict'로, 인코딩 " -"오류가 발생하면 UnicodeEncodeError를 발생시킵니다. 다른 가능한 값으로는 " -"'ignore', 'replace', 'xmlcharrefreplace', 그리고 UnicodeEncodeError를 처리할 " -"수 있는 codecs.register_error에 등록된 다른 이름도 사용할 수 있습니다." -#: flwr.common.EventType.endswith:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, stop " -"comparing S at that position. suffix can also be a tuple of strings to try." +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"S가 지정된 접미사로 끝나면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다. 시작 옵션을 사용하면 해당 위치부터 S를 테스트합니다. end 옵션을 " -"사용하면 해당 위치에서 S 비교를 중지합니다. 접미사는 시도할 문자열의 튜플일 " -"수도 있습니다." -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." -msgstr "탭 크기를 지정하지 않으면 크기가 8로 지정됩니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Return the lowest index in S where substring sub is found, such that sub is " -"contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"부분 문자열 sub가 발견되는 곳의 가장 낮은 인덱스를 반환하며, sub는 " -"S[start:end] 내에 포함되어야 합니다. 선택적 인자 start와 end는 슬라이스 " -"표기법과 같이 해석됩니다." -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." -msgstr "실패 시 -1을 반환합니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.EventType.format:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Return a formatted version of S, using substitutions from args and kwargs. " -"The substitutions are identified by braces ('{' and '}')." -msgstr "args와 kwargs의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 " -"중괄호('{' 및 '}')로 식별됩니다." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.EventType.format_map:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." -msgstr "매핑의 치환을 사용하여 형식이 지정된 S를 반환합니다. 치환은 중괄호('{' 및 " -"'}')로 식별됩니다." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." -msgstr "부분 문자열을 찾을 수 없을 때 ValueError를 발생시킵니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" -#: flwr.common.EventType.isalnum:3 of -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-numeric " -"and there is at least one character in the string." -msgstr "문자열의 모든 문자가 영숫자이고 문자열에 하나 이상의 문자가 있는 경우 " -"문자열은 영-숫자입니다." +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" -#: flwr.common.EventType.isalpha:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is alphabetic if all characters in the string are alphabetic and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 알파벳이고 문자열에 하나 이상의 문자가 있는 경우 " -"문자열은 알파벳입니다." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" -#: flwr.common.EventType.isascii:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty string " -"is ASCII too." -msgstr "ASCII 문자는 U+0000-U+007F 범위의 코드 포인트가 있습니다. 빈 문자열도 " -"ASCII입니다." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.EventType.isdecimal:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is a decimal string if all characters in the string are decimal and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 10진수이고 문자열에 하나 이상의 문자가 있는 경우 " -"문자열은 10진수 문자열입니다." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.isdigit:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is a digit string if all characters in the string are digits and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 " -"숫자 문자열입니다." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.isidentifier:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved identifier, " -"such as \"def\" or \"class\"." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"keyword.iskeyword(s)를 호출하여 문자열 s가 \"def\" 또는 \"class\"와 같은 " -"예약 식별자인지 테스트합니다." -#: flwr.common.EventType.islower:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is lowercase if all cased characters in the string are lowercase " -"and there is at least one cased character in the string." -msgstr "문자열이 모두 소문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 " -"소문자입니다." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.EventType.isnumeric:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is numeric if all characters in the string are numeric and there is " -"at least one character in the string." -msgstr "문자열의 모든 문자가 숫자이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 " -"숫자입니다." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.EventType.isprintable:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A string is printable if all of its characters are considered printable in " -"repr() or if it is empty." -msgstr "문자열은 repr()에서 모든 문자가 인쇄 가능한 것으로 간주되거나 비어 있는 경우 " -"인쇄할 수 있습니다." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.EventType.isspace:3 of -msgid "" -"A string is whitespace if all characters in the string are whitespace and " -"there is at least one character in the string." -msgstr "문자열의 모든 문자가 공백이고 문자열에 하나 이상의 문자가 있는 경우 문자열은 " -"공백입니다." +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" +msgstr "" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow " -"uncased characters and lowercase characters only cased ones." -msgstr "제목 대/소문자 문자열에서 대문자와 제목 대문자는 대소문자만, 소문자는 " -"대문자만 뒤에 올 수 있습니다." +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" -#: flwr.common.EventType.isupper:3 of +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"A string is uppercase if all cased characters in the string are uppercase " -"and there is at least one cased character in the string." -msgstr "문자열의 모든 문자가 대문자이고 문자열에 문자가 하나 이상 있는 경우 문자열은 " -"대문자입니다." +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" -#: flwr.common.EventType.join:3 of +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"The string whose method is called is inserted in between each given string. " -"The result is returned as a new string." -msgstr "메서드가 호출되는 문자열은 주어진 각 문자열 사이에 삽입됩니다. 결과는 새 " -"문자열로 반환됩니다." - -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -msgstr "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." -msgstr "None이 아닌 문자가 지정되면 대신 문자열에서 문자를 제거합니다." +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "" -#: flwr.common.EventType.maketrans:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the same " -"position in y. If there is a third argument, it must be a string, whose " -"characters will be mapped to None in the result." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"argument이 하나만 있는 경우, 유니코드 서수(정수) 또는 문자를 유니코드 서수, " -"문자열 또는 None에 매핑하는 dictionary이어야 합니다. 그러면 문자 키가 서수로 " -"변환됩니다. 인수가 두 개이면 길이가 같은 문자열이어야 하며, 결과 " -"dictionary에서 x의 각 문자는 y의 같은 위치에 있는 문자에 매핑됩니다. 세 번째 " -"인수가 있으면 문자열이어야 하며, 그 문자는 결과에서 None에 매핑됩니다." -#: flwr.common.EventType.partition:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"문자열에서 구분 기호를 검색합니다. 구분 기호가 발견되면 구분 기호 앞 부분, " -"구분 기호 자체, 구분 기호 뒤 부분을 포함하는 3-tuple을 반환합니다." -#: flwr.common.EventType.partition:7 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." -msgstr "구분 기호를 찾을 수 없으면 원래 문자열과 빈 문자열 2개를 포함하는 3-튜플을 " -"반환합니다." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.removeprefix:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If the string starts with the prefix string, return string[len(prefix):]. " -"Otherwise, return a copy of the original string." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"문자열이 접두사 문자열로 시작하면 문자열[len(prefix):]을 반환합니다. 그렇지 " -"않으면 원본 문자열의 복사본을 반환합니다." -#: flwr.common.EventType.removesuffix:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"문자열이 접미사 문자열로 끝나고 해당 접미사가 비어 있지 않으면 " -"문자열[:-len(suffix)]을 반환합니다. 그렇지 않으면 원본 문자열의 복사본을 " -"반환합니다." - -#: flwr.common.EventType.replace:5 of -msgid "count" -msgstr "카운트" -#: flwr.common.EventType.replace:4 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." -msgstr "대체할 최대 발생 횟수입니다. -1(기본값)은 모든 항목을 교체한다는 의미입니다." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.EventType.replace:7 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"If the optional argument count is given, only the first count occurrences " -"are replaced." -msgstr "선택적 argument 개수를 지정하면 첫 번째 개수만 바뀝니다." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Return the highest index in S where substring sub is found, such that sub is " -"contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"부분 문자열 sub가 발견되는 곳의 가장 높은 인덱스를 반환하며, sub는 " -"S[start:end] 내에 포함되어야 합니다. 선택적 인자 start와 end는 슬라이스 " -"표기법과 같이 해석됩니다." -#: flwr.common.EventType.rpartition:3 of -msgid "" -"This will search for the separator in the string, starting at the end. If " -"the separator is found, returns a 3-tuple containing the part before the " -"separator, the separator itself, and the part after it." +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -"그러면 문자열에서 끝 부분부터 시작하여 구분 기호를 검색합니다. 구분 기호가 " -"발견되면 구분 기호 앞 부분, 구분 기호 자체, 구분 기호 뒤 부분을 포함하는 3-" -"tuple을 반환합니다." -#: flwr.common.EventType.rpartition:7 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." -msgstr "구분 기호를 찾을 수 없는 경우 빈 문자열 2개와 원래 문자열을 포함하는 3-" -"tuple을 반환합니다." - -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" -msgstr "sep" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." -msgstr "문자열을 분할하는 데 사용되는 구분 기호입니다." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"When set to None (the default value), will split on any whitespace character " -"(including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard empty " -"strings from the result." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -"None(기본값)으로 설정하면 모든 공백 문자(\\\\n" -" \\\\r \\\\t \\\\f 및 공백 포함)를 분할하고 결과에서 빈 문자열을 삭제합니다." -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" -msgstr "maxsplit" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." +msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Maximum number of splits (starting from the left). -1 (the default value) " -"means no limit." -msgstr "최대 분할 횟수(왼쪽부터 시작). -1(기본값)은 제한이 없음을 의미합니다." - -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." -msgstr "분할은 문자열 끝에서 시작하여 앞쪽으로 진행됩니다." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.EventType.split:13 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using the " -"regular expression module." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"참고로 str.split()은 주로 의도적으로 구분된 데이터에 유용합니다. 구두점이 " -"포함된 자연 텍스트의 경우 정규식 모듈을 사용하는 것이 좋습니다." -#: flwr.common.EventType.splitlines:3 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Line breaks are not included in the resulting list unless keepends is given " -"and true." -msgstr "줄 바꿈은 keepends가 주어지고 참이 아니면 결과 목록에 포함되지 않습니다." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.EventType.startswith:1 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, stop " -"comparing S at that position. prefix can also be a tuple of strings to try." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"S가 지정된 접두사로 시작하면 True를 반환하고, 그렇지 않으면 False를 " -"반환합니다. 시작 옵션을 사용하면 해당 위치에서 시작되는 S를 테스트합니다. " -"선택적 end를 사용하면 해당 위치에서 S 비교를 중지합니다. 접두사는 시도할 " -"문자열의 튜플일 수도 있습니다." -#: flwr.common.EventType.title:3 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"More specifically, words start with uppercased characters and all remaining " -"cased characters have lower case." -msgstr "보다 구체적으로, 단어는 대문자로 시작하고 나머지 모든 대소문자는 소문자로 " -"표기합니다." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" +msgstr "" -#: flwr.common.EventType.translate:5 of -msgid "table" -msgstr "table" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." +msgstr "" -#: flwr.common.EventType.translate:4 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode " -"ordinals, strings, or None." -msgstr "유니코드 서수를 유니코드 서수, 문자열 또는 없음으로 매핑하는 번역 " -"테이블이어야 합니다." +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" +msgstr "" -#: flwr.common.EventType.translate:7 of +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character is " -"left untouched. Characters mapped to None are deleted." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"테이블은 사전이나 목록과 같이 __getitem__을 통해 조회/색인을 구현해야 " -"합니다. 이 작업에서 LookupError가 발생하면 문자는 그대로 유지됩니다. " -"없음으로 매핑된 문자는 삭제됩니다." - -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." -msgstr "문자열은 잘리지 않습니다." - -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" -msgstr "FitIns" - -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" - -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" - -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" -msgstr "FitRes" - -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" - -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -msgid "GetParametersIns" -msgstr "GetParametersIns" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -msgid "GetParametersRes" -msgstr "GetParametersRes" +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: flwr.server.strategy.fedavgm.FedAvgM:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" -msgstr "GetPropertiesIns" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" -msgstr "GetPropertiesRes" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" -msgstr ":py:obj:`properties `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -msgid "Message" -msgstr "Message" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." -msgstr "실행할 메시지에 대한 정보를 포함한 데이터 클래스입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.message.Message:5 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side " -"logic to a client, or vice-versa) or that will be sent to it." -msgstr "다른 엔터티(예: 서버 측 로직이 클라이언트로 전송하거나 그 반대로 전송하는 등)" -"가 전송했거나 전송할 레코드를 보유합니다." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.message.Message:8 of +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A dataclass that captures information about an error that took place when " -"processing another message." -msgstr "다른 메시지를 처리할 때 발생한 오류에 대한 정보를 캡처하는 데이터 " -"클래스입니다." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`create_error_reply `\\ \\(" -"error\\[\\, ttl\\]\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." -msgstr "오류가 발생했음을 나타내는 답장 메시지를 작성합니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`create_reply `\\ \\(content\\[\\, " -"ttl\\]\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`create_reply `\\ \\(content\\[\\, " -"ttl\\]\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." -msgstr "지정된 콘텐츠와 TTL을 사용하여 이 메시지에 대한 답글을 작성합니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" -msgstr ":py:obj:`has_content `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." -msgstr "메시지에 콘텐츠가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr ":py:obj:`has_error `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." -msgstr "메시지에 오류가 있으면 True을 반환하고, 그렇지 않으면 False을 반환합니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" -msgstr ":py:obj:`content `\\" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" +msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of -msgid "The content of this message." -msgstr "이 메시지의 내용입니다." +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" -msgstr ":py:obj:`error `\\" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." -msgstr "이 메시지가 캡처한 오류입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" -msgstr ":py:obj:`metadata `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." -msgstr "오류가 발생했습니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based on " -"the remaining time for the received message before it expires. This follows " -"the equation: ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta." -"created_at)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 " -"전까지 남은 시간을 기준으로 설정됩니다. 이는 다음과 같은 공식을 따릅니다: " -"ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based on " -"the remaining time for the received message before it expires. This follows " -"the equation:" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"이 메시지의 남은 시간(초)입니다. 설정하지 않으면 수신된 메시지가 만료되기 " -"전까지 남은 시간을 기준으로 설정됩니다. 이는 다음 공식을 따릅니다:" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.message.Message.create_reply:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The method generates a new `Message` as a reply to this message. It inherits " -"'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from this message " -"and sets 'reply_to_message' to the ID of this message." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"이 메서드는 이 메시지에 대한 응답으로 새로운 '메시지'를 생성합니다. 이 " -"메시지에서 'run_id', 'src_node_id', 'dst_node_id', 'message_type'을 상속하고 " -"'reply_to_message'를 이 메시지의 ID로 설정합니다." -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." -msgstr "답장 메시지의 콘텐츠입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." -msgstr "답장을 나타내는 새로운 `메시지` 인스턴스입니다." +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" -msgstr "MessageType" +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" -msgstr ":py:obj:`EVALUATE `\\" +#: flwr.server.strategy.fedprox.FedProx:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" -msgstr ":py:obj:`QUERY `\\" +#: flwr.server.strategy.fedprox.FedProx:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" +msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" -msgstr ":py:obj:`TRAIN `\\" +#: flwr.server.strategy.fedprox.FedProx:12 of +msgid "" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" -msgstr "MessageTypeLegacy" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:30 of +msgid "" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -":py:obj:`GET_PARAMETERS `\\" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -":py:obj:`GET_PARAMETERS `\\" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`GET_PROPERTIES `\\" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -":py:obj:`GET_PROPERTIES `\\" - -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." -msgstr "현재 실행에 대한 식별자입니다." - -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." -msgstr "현재 메시지의 식별자입니다." -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." -msgstr "이 메시지를 보내는 노드의 식별자입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." -msgstr "이 메시지를 수신하는 노드의 식별자입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." -msgstr "이 메시지가 회신하는 메시지의 식별자입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.message.Metadata:13 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An identifier for grouping messages. In some settings, this is used as the " -"FL round." -msgstr "메시지를 그룹화하기 위한 식별자입니다. 일부 설정에서는 FL 라운드로 " -"사용됩니다." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." -msgstr "이 메시지의 유효 시간(초)입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." -msgstr "수신 측에서 실행할 작업을 인코딩하는 문자열입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.message.Metadata:21 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An identifier that can be used when loading a particular data partition for " -"a ClientApp. Making use of this identifier is more relevant when conducting " -"simulations." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"클라이언트 앱의 특정 데이터 파티션을 로드할 때 사용할 수 있는 식별자입니다. " -"시뮬레이션을 수행할 때 이 식별자를 사용하는 것이 더 적절합니다." -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`created_at `\\" -msgstr ":py:obj:`created_at `\\" +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." -msgstr "메시지가 생성된 때의 Unix timestamp입니다." +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" -msgstr ":py:obj:`dst_node_id `\\" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" -msgstr ":py:obj:`group_id `\\" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." -msgstr "메시지를 그룹화하기 위한 식별자입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" -msgstr ":py:obj:`message_id `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" -msgstr ":py:obj:`message_type `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" -msgstr ":py:obj:`partition_id `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" -msgstr ":py:obj:`reply_to_message `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" -msgstr ":py:obj:`run_id `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" -msgstr ":py:obj:`src_node_id `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" -msgstr ":py:obj:`ttl `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" +msgstr "" + +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 #: of -msgid "Time-to-live for this message." -msgstr "이 메시지를 기다리는 시간입니다." +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" -msgstr "MetricsRecord" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." +msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" -"`str`, :py:class:`int` | :py:class:`float` | :py:class:`~typing.List`\\ [:py:" -"class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`]]" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`int` | :py:class:`float` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`]]" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*F\\" -")" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" -msgstr "NDArray" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" +msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" -msgstr ":py:obj:`tensors `\\" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" -msgstr ":py:obj:`tensor_type `\\" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -msgid "ParametersRecord" -msgstr "ParametersRecord" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:" -"`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ [:py:class:`str`" -", :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"A dataclass storing named Arrays in order. This means that it holds entries " -"as an OrderedDict[str, Array]. ParametersRecord objects can be viewed as an " -"equivalent to PyTorch's state_dict, but holding serialised tensors instead." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Arrays라는 이름의 데이터 클래스를 순서대로 저장합니다. 즉, OrderedDict[str, " -"Array]로 항목을 보유합니다. ParametersRecord 객체는 파이토치의 state_dict와 " -"동등한 것으로 볼 수 있지만, 대신 직렬화된 텐서를 보유합니다." - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`count_bytes `\\ \\(\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -":py:obj:`count_bytes `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, \\]\\*\\*" -"F\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" +msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"Note that a small amount of Bytes might also be included in this counting " -"that correspond to metadata of the serialized object (e.g. of NumPy array) " -"needed for deseralization." -msgstr "역직렬화에 필요한 직렬화된 객체의 메타데이터(예: NumPy 배열)에 해당하는 " -"소량의 바이트도 이 카운팅에 포함될 수 있습니다." +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -msgid "ReconnectIns" -msgstr "ReconnectIns" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" -msgstr ":py:obj:`seconds `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" -msgstr "RecordSet" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" -msgstr ":py:obj:`configs_records `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." -msgstr "Dictionary holding ConfigsRecord instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" -msgstr ":py:obj:`metrics_records `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." -msgstr "Dictionary holding MetricsRecord instances." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`parameters_records `\\" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`parameters_records `\\" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." -msgstr "Dictionary holding ParametersRecord instances." +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -msgid "ServerMessage" -msgstr "ServerMessage" +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" -msgstr ":py:obj:`evaluate_ins `\\" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" -msgstr ":py:obj:`fit_ins `\\" +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`get_parameters_ins `\\" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -":py:obj:`get_parameters_ins `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`get_properties_ins `\\" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`get_properties_ins `\\" -#: ../../source/ref-api/flwr.common.Status.rst:2 -msgid "Status" -msgstr "Status" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" -msgstr ":py:obj:`message `\\" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" -msgstr "array\\_from\\_numpy" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" -msgstr "bytes\\_to\\_ndarray" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -msgid "configure" -msgstr "구성" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" -msgstr "이벤트" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" +msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" -msgstr "로그" +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "" -#: logging.Logger.log:3 of +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"To pass exception information, use the keyword argument exc_info with a true " -"value, e.g." -msgstr "예외 정보를 전달하려면 키워드 argument exc_info를 참 값과 함께 사용합니다." - -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of -msgid "Run Flower server app." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of -msgid "Run Flower SuperLink (Driver API and Fleet API)." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`Driver `\\ \\(\\)" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of -msgid "Flower ServerApp." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\, " -"round\\_timeout\\]\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of -msgid "Flower server config." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -":py:obj:`SimpleClientManager `\\ \\(\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.workflow `\\" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of -msgid "Workflows." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -msgid "ClientManager" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +msgid "" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -":py:obj:`num_available `\\ \\(\\)" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`sample `\\ \\(num\\_clients\\[\\, " -"min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`unregister `\\ \\(client\\)" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any reason." +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +msgid "" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -msgid "Driver" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -":py:obj:`create_message `\\ \\(content\\, " -"message\\_type\\, ...\\[\\, ttl\\]\\)" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of +msgid "" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of +msgid "" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of +msgid "" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of +msgid "" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -":py:obj:`push_messages `\\ \\(messages\\)" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of +msgid "" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of +msgid "" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"This method constructs a new `Message` with given content and metadata. The " -"`run_id` and `src_node_id` will be set automatically." +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"The content for the new message. This holds records that are to be sent to " -"the destination node." +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"The type of the message, defining the action to be executed on the receiving " -"end." +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some settings, " -"this is used as the FL round." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"Time-to-live for the round trip of this message, i.e., the time from sending " -"this message to receiving a reply. It specifies in seconds the duration for " -"which the message and its potential reply are considered valid. If unset, " -"the default TTL (i.e., `common.DEFAULT_TTL`) will be used." +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"This method is used to collect messages from the SuperLink that correspond " -"to a set of given message IDs." +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"An iterable of message IDs for which reply messages are to be retrieved." +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"This method takes an iterable of messages and sends each message to the node " -"specified in `dst_node_id`." +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, which " -"can be used to pull replies." +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of -msgid "" -"This method sends a list of messages to their destination node IDs and then " -"waits for the replies. It continues to pull replies until either all replies " -"are received or the specified timeout duration is exceeded." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the method " -"will wait until replies for all messages are received." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "" -"**replies** -- An iterable of reply messages received from the SuperLink." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages` to " -"collect the replies. If `timeout` is set, the method may not return replies " -"for all sent messages. A message remains valid until its TTL, which is not " -"affected by `timeout`." +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -":py:obj:`add_loss_centralized `\\ " -"\\(server\\_round\\, loss\\)" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of +msgid "" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -":py:obj:`add_loss_distributed `\\ " -"\\(server\\_round\\, loss\\)" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of +msgid "" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -":py:obj:`add_metrics_centralized `\\ \\(server\\_round\\, metrics\\)" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of +msgid "" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -":py:obj:`add_metrics_distributed `\\ \\(server\\_round\\, metrics\\)" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -":py:obj:`add_metrics_distributed_fit `\\ \\(server\\_round\\, ...\\)" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +msgid "" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`strategy `\\" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy +msgid "" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`history `\\" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`state `\\" +#: flwr.simulation.run_simulation.run_simulation:3 of +msgid "" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" +#: flwr.simulation.run_simulation.run_simulation:6 of +msgid "" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: flwr.simulation.run_simulation.run_simulation:9 of +msgid "" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -":py:obj:`disconnect_all_clients `\\ \\(timeout\\)" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: flwr.simulation.run_simulation.run_simulation:21 of +msgid "" +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\, " -"timeout\\)" +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 +msgid "" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-changelog.md:9 msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "" +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "선택적 개선 사항" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-changelog.md:13 msgid "" -":py:obj:`set_strategy `\\ \\(strategy\\)" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -msgid "Replace server strategy." +#: ../../source/ref-changelog.md:15 +msgid "" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" +#: ../../source/ref-changelog.md:17 +msgid "" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: flwr.server.server_app.ServerApp:5 of -msgid "Use the `ServerApp` with an existing `Strategy`:" +#: ../../source/ref-changelog.md:19 +msgid "" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: ../../source/ref-changelog.md:21 +msgid "" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid ":py:obj:`main `\\ \\(\\)" +#: ../../source/ref-changelog.md:23 +msgid "" +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: ../../source/ref-changelog.md:25 +msgid "" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -msgid "ServerConfig" +#: ../../source/ref-changelog.md:27 +msgid "" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: flwr.server.server_config.ServerConfig:3 of +#: ../../source/ref-changelog.md:29 msgid "" -"All attributes have default values which allows users to configure just the " -"ones they care about." +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: ../../source/ref-changelog.md:35 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: ../../source/ref-changelog.md:41 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: ../../source/ref-changelog.md:45 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-changelog.md:47 +msgid "" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-changelog.md:49 msgid "" -":py:obj:`num_available `\\ " -"\\(\\)" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`register `\\ \\(client\\)" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-changelog.md:53 msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-changelog.md:55 msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: ../../source/ref-changelog.md:59 msgid "" -"Blocks until the requested number of clients is available or until a timeout " -"is reached. Current timeout default: 1 day." +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: ../../source/ref-changelog.md:60 +msgid "" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." +#: ../../source/ref-changelog.md:61 +msgid "" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: ../../source/ref-changelog.md:63 +msgid "" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" +#: ../../source/ref-changelog.md:65 +msgid "" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: ../../source/ref-changelog.md:68 +msgid "" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" +#: ../../source/ref-changelog.md:69 +msgid "" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" +#: ../../source/ref-changelog.md:70 +msgid "" +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: ../../source/ref-changelog.md:72 +msgid "" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -#: flwr.server.app.start_server:5 of +#: ../../source/ref-changelog.md:74 msgid "" -"A server implementation, either `flwr.server.Server` or a subclass thereof. " -"If no instance is provided, then `start_server` will create one." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: ../../source/ref-changelog.md:76 msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -#: flwr.server.app.start_server:12 of +#: ../../source/ref-changelog.md:78 msgid "" -"An implementation of the abstract base class `flwr.server.strategy." -"Strategy`. If no strategy is provided, then `start_server` will use `flwr." -"server.strategy.FedAvg`." +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: flwr.server.app.start_server:16 of +#: ../../source/ref-changelog.md:80 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`. If " -"no implementation is provided, then `start_server` will use `flwr.server." -"client_manager.SimpleClientManager`." +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -#: flwr.server.app.start_server:21 of +#: ../../source/ref-changelog.md:82 msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower " -"clients. The default should be sufficient for most models. Users who train " -"very large models might need to increase this value. Note that the Flower " -"clients need to be started with the same value (see `flwr.client." -"start_client`), otherwise clients will not know about the increased limit " -"and block larger messages." +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: flwr.server.app.start_server:28 of +#: ../../source/ref-changelog.md:84 msgid "" -"Tuple containing root certificate, server certificate, and private key to " -"start a secure SSL-enabled server. The tuple is expected to have three bytes " -"elements in the following order: * CA certificate. * server " -"certificate. * server private key." +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" +msgstr "" + +#: ../../source/ref-changelog.md:86 +msgid "" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: flwr.server.app.start_server:28 of +#: ../../source/ref-changelog.md:88 msgid "" -"Tuple containing root certificate, server certificate, and private key to " -"start a secure SSL-enabled server. The tuple is expected to have three bytes " -"elements in the following order:" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: ../../source/ref-changelog.md:102 +msgid "" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: ../../source/ref-changelog.md:104 +msgid "" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: ../../source/ref-changelog.md:113 +msgid "" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: ../../source/ref-changelog.md:115 +msgid "" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: ../../source/ref-changelog.md:117 +msgid "" +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:121 msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, fraction\\_fit\\, " -"fraction\\_evaluate\\, ...\\)" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: ../../source/ref-changelog.md:123 +msgid "" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: ../../source/ref-changelog.md:135 +msgid "" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:137 msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: ../../source/ref-changelog.md:139 +msgid "v1.10.0 (2024-07-24)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:145 msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping `\\ \\(...\\)" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: ../../source/ref-changelog.md:149 +msgid "" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:151 msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping `\\ \\(...\\)" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +#: ../../source/ref-changelog.md:153 +msgid "" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:155 msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping `\\ \\(...\\)" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: ../../source/ref-changelog.md:157 +msgid "" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:159 msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping `\\ \\(...\\)" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +#: ../../source/ref-changelog.md:161 +msgid "" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:163 msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: ../../source/ref-changelog.md:165 +msgid "" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:167 msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: ../../source/ref-changelog.md:169 +msgid "" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:171 msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." +#: ../../source/ref-changelog.md:173 +msgid "" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:175 msgid "" -":py:obj:`FedAvgAndroid `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:177 msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." +#: ../../source/ref-changelog.md:179 +msgid "" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:181 msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: ../../source/ref-changelog.md:183 +msgid "" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:185 msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Federated Optim strategy." +#: ../../source/ref-changelog.md:187 +msgid "" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:189 msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." +#: ../../source/ref-changelog.md:191 +msgid "" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:193 msgid "" -":py:obj:`FedTrimmedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: ../../source/ref-changelog.md:195 +msgid "" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:197 msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: ../../source/ref-changelog.md:199 +msgid "" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:201 msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: ../../source/ref-changelog.md:203 +msgid "" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "선택적 개선 사항" + +#: ../../source/ref-changelog.md:209 msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: ../../source/ref-changelog.md:211 +msgid "" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:213 msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: ../../source/ref-changelog.md:215 +msgid "" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FaultTolerantFedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: ../../source/ref-changelog.md:221 +msgid "" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:223 msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, fraction\\_fit\\, " -"fraction\\_evaluate\\, ...\\]\\)" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: ../../source/ref-changelog.md:225 +msgid "" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-changelog.md:229 msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, q\\_param\\, " -"qffl\\_learning\\_rate\\, ...\\]\\)" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: ../../source/ref-changelog.md:231 +msgid "" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid ":py:obj:`Strategy `\\ \\(\\)" +#: ../../source/ref-changelog.md:233 +msgid "" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: ../../source/ref-changelog.md:235 +msgid "" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: ../../source/ref-changelog.md:237 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: ../../source/ref-changelog.md:243 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: ../../source/ref-changelog.md:247 +msgid "" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: ../../source/ref-changelog.md:249 +msgid "" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: ../../source/ref-changelog.md:251 +msgid "" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: ../../source/ref-changelog.md:253 +msgid "" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: ../../source/ref-changelog.md:255 +msgid "" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: ../../source/ref-changelog.md:257 +msgid "" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: ../../source/ref-changelog.md:259 +msgid "" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: ../../source/ref-changelog.md:261 +msgid "" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: ../../source/ref-changelog.md:263 +msgid "" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: ../../source/ref-changelog.md:265 +msgid "" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: ../../source/ref-changelog.md:267 +msgid "" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." +#: ../../source/ref-changelog.md:269 +msgid "" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:27 of +#: ../../source/ref-changelog.md:271 msgid "" -"Byzantine resilient aggregation rule that is used as the first step of the " -"Bulyan (e.g., Krum)" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: ../../source/ref-changelog.md:273 +msgid "" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:275 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." +#: ../../source/ref-changelog.md:277 +msgid "" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:279 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." +#: ../../source/ref-changelog.md:281 +msgid "" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:283 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: ../../source/ref-changelog.md:285 +msgid "" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" +msgstr "" + +#: ../../source/ref-changelog.md:287 +msgid "" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." +msgstr "" + +#: ../../source/ref-changelog.md:289 +msgid "" +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" +msgstr "" + +#: ../../source/ref-changelog.md:291 +msgid "" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:293 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: ../../source/ref-changelog.md:295 +msgid "" +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:297 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: ../../source/ref-changelog.md:299 +msgid "" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:301 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." +#: ../../source/ref-changelog.md:303 +msgid "" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." +msgstr "" + +#: ../../source/ref-changelog.md:307 +msgid "" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" +msgstr "" + +#: ../../source/ref-changelog.md:311 +msgid "**Deprecate Python 3.8 support**" +msgstr "" + +#: ../../source/ref-changelog.md:313 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: ../../source/ref-changelog.md:315 +msgid "" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:317 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: ../../source/ref-changelog.md:319 +msgid "" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" +msgstr "" + +#: ../../source/ref-changelog.md:321 +msgid "" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +#: ../../source/ref-changelog.md:325 +msgid "" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: ../../source/ref-changelog.md:327 +msgid "" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: ../../source/ref-changelog.md:329 +msgid "" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:331 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." +#: ../../source/ref-changelog.md:333 +msgid "" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:335 msgid "" -":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: ../../source/ref-changelog.md:337 +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:343 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " +msgstr "" + +#: ../../source/ref-changelog.md:347 +msgid "" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" +msgstr "" + +#: ../../source/ref-changelog.md:349 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: ../../source/ref-changelog.md:351 +msgid "" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:353 msgid "" -":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:355 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +#: ../../source/ref-changelog.md:357 msgid "" -"Evaluate model parameters using an evaluation function from the strategy." +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:359 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +#: ../../source/ref-changelog.md:361 +msgid "" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: ../../source/ref-changelog.md:363 +msgid "" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: ../../source/ref-changelog.md:365 +msgid "" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: ../../source/ref-changelog.md:367 msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this list, " -"it means that this `ClientProxy` will not participate in the next round of " -"federated evaluation." +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" +#: ../../source/ref-changelog.md:369 +msgid "" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: ../../source/ref-changelog.md:371 +msgid "" +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:373 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:375 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: ../../source/ref-changelog.md:377 +msgid "" +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:379 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:381 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +#: ../../source/ref-changelog.md:383 msgid "" -"Configure the next round of training incorporating Differential Privacy (DP)." +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:385 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:387 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +#: ../../source/ref-changelog.md:389 msgid "" -"Configuration of the next training round includes information related to DP, " -"such as clip norm and noise stddev." +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +msgstr "" + +#: ../../source/ref-changelog.md:391 +msgid "" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" +msgstr "" + +#: ../../source/ref-changelog.md:393 +msgid "" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." +msgstr "" + +#: ../../source/ref-changelog.md:395 +msgid "" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" +msgstr "" + +#: ../../source/ref-changelog.md:401 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +#: ../../source/ref-changelog.md:407 msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list identifies " -"a `ClientProxy` and the `FitIns` for this particular `ClientProxy`. If a " -"particular `ClientProxy` is not included in this list, it means that this " -"`ClientProxy` will not participate in the next round of federated learning." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: ../../source/ref-changelog.md:411 +msgid "" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: ../../source/ref-changelog.md:413 +msgid "" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: ../../source/ref-changelog.md:415 msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `adaptiveclipping_mod`." +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: ../../source/ref-changelog.md:417 +msgid "" +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: ../../source/ref-changelog.md:419 +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: ../../source/ref-changelog.md:421 +msgid "" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: ../../source/ref-changelog.md:423 msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of +#: ../../source/ref-changelog.md:425 msgid "" -"The desired quantile of updates which should be clipped. Defaults to 0.5." +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: ../../source/ref-changelog.md:427 msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. Andrew " -"et al. recommends to set to 0.2." +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: ../../source/ref-changelog.md:429 msgid "" -"The stddev of the noise added to the count of updates currently below the " -"estimate. Andrew et al. recommends to set to `expected_num_records/20`" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -msgid "Create a strategy:" +#: ../../source/ref-changelog.md:431 +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: ../../source/ref-changelog.md:433 msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideAdaptiveClipping` " -"wrapper:" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: ../../source/ref-changelog.md:435 +msgid "" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:437 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:439 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -msgid "Aggregate training results and update clip norms." +#: ../../source/ref-changelog.md:441 +msgid "" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:443 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:445 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:447 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:449 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -msgid "DifferentialPrivacyClientSideFixedClipping" +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: ../../source/ref-changelog.md:455 msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen on " -"the client-side, usually by using the built-in `fixedclipping_mod`." +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A value " -"of 1.0 or higher is recommended for strong privacy." +#: ../../source/ref-changelog.md:456 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "" + +#: ../../source/ref-changelog.md:457 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "" + +#: ../../source/ref-changelog.md:458 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "" + +#: ../../source/ref-changelog.md:459 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "" + +#: ../../source/ref-changelog.md:460 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: ../../source/ref-changelog.md:462 msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: ../../source/ref-changelog.md:464 +msgid "" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:466 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:468 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -msgid "Add noise to the aggregated parameters." +#: ../../source/ref-changelog.md:470 +msgid "" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:474 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:476 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:478 msgid "" -":py:obj:`evaluate `\\ \\(server\\_round\\, " -"parameters\\)" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:480 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: ../../source/ref-changelog.md:482 +msgid "" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: ../../source/ref-changelog.md:484 msgid "" -"The standard deviation of the noise added to the count of updates below the " -"estimate. Andrew et al. recommends to set to `expected_num_records/20`" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: ../../source/ref-changelog.md:486 msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:488 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:490 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:492 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:494 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:500 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:504 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -msgid "DifferentialPrivacyServerSideFixedClipping" +#: ../../source/ref-changelog.md:506 +msgid "" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: ../../source/ref-changelog.md:508 msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:510 msgid "" -":py:obj:`aggregate_evaluate `\\ " -"\\(server\\_round\\, results\\, ...\\)" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:512 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: ../../source/ref-changelog.md:514 +msgid "" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:516 msgid "" -":py:obj:`configure_evaluate `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:518 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ \\(server\\_round\\, " -"parameters\\)" +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:522 msgid "" -":py:obj:`initialize_parameters `\\ " -"\\(client\\_manager\\)" +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: ../../source/ref-changelog.md:526 +msgid "" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:528 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:530 msgid "" -":py:obj:`aggregate_fit `\\ \\(server\\_round\\, results\\, failures\\)" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: ../../source/ref-changelog.md:532 +msgid "" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:534 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:536 msgid "" -":py:obj:`configure_fit `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:538 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:540 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:548 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" +#: ../../source/ref-changelog.md:550 +msgid "" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +#: ../../source/ref-changelog.md:552 +msgid "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-changelog.md:554 +msgid "" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:570 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:572 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:574 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:576 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:578 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: ../../source/ref-changelog.md:580 +msgid "" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" + +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: ../../source/ref-changelog.md:586 +msgid "" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: ../../source/ref-changelog.md:588 +msgid "" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:590 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:592 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:600 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:604 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:606 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:608 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:610 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: ../../source/ref-changelog.md:612 +msgid "" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-changelog.md:614 +msgid "" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of +#: ../../source/ref-changelog.md:616 msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will still " -"be sampled. Defaults to 1.0." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of +#: ../../source/ref-changelog.md:618 msgid "" -"Fraction of clients used during validation. In case `min_evaluate_clients` " -"is larger than `fraction_evaluate * available_clients`, " -"`min_evaluate_clients` will still be sampled. Defaults to 1.0." +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-changelog.md:620 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:622 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:624 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:628 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:630 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:632 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:634 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:636 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +#: ../../source/ref-changelog.md:638 +msgid "" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:640 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:642 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:644 msgid "" -":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +#: ../../source/ref-changelog.md:646 +msgid "" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:648 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:650 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:652 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:654 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:656 msgid "" -":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: ../../source/ref-changelog.md:658 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:660 msgid "" -":py:obj:`ndarrays_to_parameters `\\ \\(ndarrays\\)" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:662 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:666 msgid "" -":py:obj:`parameters_to_ndarrays `\\ \\(parameters\\)" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of -msgid "Convert parameters object to NumPy weights." +#: ../../source/ref-changelog.md:668 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: ../../source/ref-changelog.md:670 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of +#: ../../source/ref-changelog.md:684 msgid "" -"Server-side learning rate used in server-side optimization. Defaults to 1.0." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-changelog.md:688 +msgid "" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:690 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:692 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:694 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:696 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:698 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:700 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:702 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:704 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: ../../source/ref-changelog.md:706 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:708 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:710 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +#: ../../source/ref-changelog.md:712 +msgid "" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:714 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:716 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:718 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:720 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:722 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:724 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: ../../source/ref-changelog.md:726 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: ../../source/ref-changelog.md:730 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:732 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:734 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:748 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:752 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:754 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:756 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:758 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: ../../source/ref-changelog.md:760 +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of +#: ../../source/ref-changelog.md:764 msgid "" -"The strategy in itself will not be different than FedAvg, the client needs " -"to be adjusted. A proximal term needs to be added to the loss function " -"during the training:" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: ../../source/ref-changelog.md:766 msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: ../../source/ref-changelog.md:768 msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: ../../source/ref-changelog.md:770 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: ../../source/ref-changelog.md:772 +msgid "" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: ../../source/ref-changelog.md:774 msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of +#: ../../source/ref-changelog.md:776 msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to be " -"closer to the server parameters during training)." +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:778 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:780 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:782 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:784 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:786 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:788 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:792 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: ../../source/ref-changelog.md:806 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: ../../source/ref-changelog.md:810 +msgid "" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: ../../source/ref-changelog.md:812 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:814 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:816 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: ../../source/ref-changelog.md:818 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:820 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:822 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:823 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:824 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:825 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:827 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: ../../source/ref-changelog.md:829 +msgid "" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:831 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." +#: ../../source/ref-changelog.md:833 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:835 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: ../../source/ref-changelog.md:837 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:839 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:841 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:843 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:845 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:847 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:849 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: ../../source/ref-changelog.md:851 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:853 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:855 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:859 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:863 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:873 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:875 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:879 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +#: ../../source/ref-changelog.md:881 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#: ../../source/ref-changelog.md:883 msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:885 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:887 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:889 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:891 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:893 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:895 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:897 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:899 msgid "" -":py:obj:`num_fit_clients `\\ \\(num\\_available\\_clients\\)" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: ../../source/ref-changelog.md:901 +msgid "" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-changelog.md:903 +msgid "" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: ../../source/ref-changelog.md:905 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: ../../source/ref-changelog.md:907 +msgid "" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:909 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:911 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:913 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:915 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:917 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:919 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:921 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:923 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: ../../source/ref-changelog.md:925 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in " -"that case classical Krum is applied." +#: ../../source/ref-changelog.md:933 +msgid "Highlights" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:938 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:942 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" + +#: ../../source/ref-changelog.md:944 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" + +#: ../../source/ref-changelog.md:948 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" + +#: ../../source/ref-changelog.md:950 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" + +#: ../../source/ref-changelog.md:952 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:954 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:956 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:958 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:964 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:966 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:970 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:972 msgid "" -":py:obj:`num_evaluation_clients `\\ \\(num\\_available\\_clients\\)" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-changelog.md:974 msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +#: ../../source/ref-changelog.md:976 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:978 msgid "" -":py:obj:`aggregate_evaluate `\\ \\(server\\_round\\, results\\, ...\\)" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." +#: ../../source/ref-changelog.md:980 +msgid "" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:982 msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:986 msgid "" -":py:obj:`configure_evaluate `\\ \\(server\\_round\\, parameters\\, ...\\)" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:988 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:990 msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: ../../source/ref-changelog.md:992 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-changelog.md:994 msgid "" -":py:obj:`initialize_parameters `\\ \\(client\\_manager\\)" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: ../../source/ref-changelog.md:996 +msgid "" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: ../../source/ref-changelog.md:998 msgid "" -"Successful updates from the previously selected and configured clients. Each " -"pair of `(ClientProxy, FitRes` constitutes a successful update from one of " -"the previously selected clients. Not that not all previously selected " -"clients are necessarily included in this list: a client might drop out and " -"not submit a result. For each client that did not submit an update, there " -"should be an `Exception` in `failures`." +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +#: ../../source/ref-changelog.md:1000 msgid "" -"Exceptions that occurred while the server was waiting for client updates." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +#: ../../source/ref-changelog.md:1002 msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-changelog.md:1004 msgid "" -"Successful updates from the previously selected and configured clients. Each " -"pair of `(ClientProxy, FitRes)` constitutes a successful update from one of " -"the previously selected clients. Not that not all previously selected " -"clients are necessarily included in this list: a client might drop out and " -"not submit a result. For each client that did not submit an update, there " -"should be an `Exception` in `failures`." +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: ../../source/ref-changelog.md:1008 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the previous " -"parameters with the ones returned from this method). If `None` is returned " -"(e.g., because there were only failures and no viable results) then the " -"server will no update the previous model parameters, the updates received in " -"this round are discarded, and the global model parameters remain the same." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: ../../source/ref-changelog.md:1010 msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +#: ../../source/ref-changelog.md:1012 msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple containing " -"loss and a dictionary containing task-specific metrics (e.g., accuracy)." +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-changelog.md:1014 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 -msgid "workflow" +#: ../../source/ref-changelog.md:1016 +msgid "" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:1018 msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-changelog.md:1020 +msgid "" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-changelog.md:1022 msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "The workflow for the SecAgg+ protocol." +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -msgid "DefaultWorkflow" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -msgid "SecAggPlusWorkflow" +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of -msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors owned " -"by multiple parties, without accessing any individual integer vector. This " -"workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and a " -"weighted version of the locally updated parameters, both of which are masked " -"for privacy. Specifically, each client uploads \"[w, w * params]\" with " -"masks, where weighting factor 'w' is the number of examples ('num_examples') " -"and 'params' represents the model parameters ('parameters') from the " -"client's `FitRes`. The server then aggregates these contributions to compute " -"the weighted average of model parameters." +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: ../../source/ref-changelog.md:1031 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-changelog.md:1033 msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: ../../source/ref-changelog.md:1035 msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of +#: ../../source/ref-changelog.md:1037 msgid "" -"Only the aggregated model parameters are exposed and passed to `Strategy." -"aggregate_fit`, ensuring individual data privacy." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of -msgid "" -"The number of shares into which each client's private key is split under the " -"SecAgg+ protocol. If specified as a float, it represents the proportion of " -"all selected clients, and the number of shares will be set dynamically in " -"the run time. A private key can be reconstructed from these shares, allowing " -"for the secure aggregation of model updates. Each client sends one share to " -"each of its neighbors while retaining one." +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of +#: ../../source/ref-changelog.md:1041 msgid "" -"The minimum number of shares required to reconstruct a client's private key, " -"or, if specified as a float, it represents the proportion of the total " -"number of shares needed for reconstruction. This threshold ensures privacy " -"by allowing for the recovery of contributions from dropped clients during " -"aggregation, without compromising individual client data." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of +#: ../../source/ref-changelog.md:1042 msgid "" -"The maximum value of the weight that can be assigned to any single client's " -"update during the weighted average calculation on the server side, e.g., in " -"the FedAvg algorithm." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of +#: ../../source/ref-changelog.md:1043 msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within [-" -"clipping_range, clipping_range], facilitating quantization." +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of +#: ../../source/ref-changelog.md:1044 msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of -msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. Please " -"use 2**n values for `modulus_range` to prevent overflow issues." +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of +#: ../../source/ref-changelog.md:1050 msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for " -"replies for this duration each time. If `None`, there is no time limit and " -"the workflow will wait until replies for all messages are received." +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: ../../source/ref-changelog.md:1052 msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` means " -"better privacy guarantees but less tolerance to dropouts." +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of +#: ../../source/ref-changelog.md:1054 msgid "" -"Too large `max_weight` may compromise the precision of the quantization." +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +#: ../../source/ref-changelog.md:1056 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of +#: ../../source/ref-changelog.md:1058 msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in the " -"runtime. This allows for dynamic adjustment based on the total number of " -"participating clients." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: ../../source/ref-changelog.md:1060 msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted as " -"the proportion of the number of shares needed for the reconstruction of a " -"private key. This feature enables flexibility in setting the security " -"threshold relative to the number of distributed shares." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of +#: ../../source/ref-changelog.md:1062 msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+ " -"protocol." +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1064 msgid "" -":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: ../../source/ref-changelog.md:1066 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1068 msgid "" -":py:obj:`setup_stage `\\ \\(driver\\, context\\, state\\)" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: ../../source/ref-changelog.md:1070 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1072 msgid "" -":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." +#: ../../source/ref-changelog.md:1074 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1076 msgid "" -":py:obj:`unmask_stage `\\ \\(driver\\, context\\, state\\)" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." +#: ../../source/ref-changelog.md:1078 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -msgid "SecAggWorkflow" +#: ../../source/ref-changelog.md:1080 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-changelog.md:1082 msgid "" -"Bases: :py:class:`~flwr.server.workflow.secure_aggregation." -"secaggplus_workflow.SecAggPlusWorkflow`" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: ../../source/ref-changelog.md:1084 msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned by " -"multiple parties, without accessing any individual integer vector. This " -"workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and a " -"weighted version of the locally updated parameters, both of which are masked " -"for privacy. Specifically, each client uploads \"[w, w * params]\" with " -"masks, where weighting factor 'w' is the number of examples ('num_examples') " -"and 'params' represents the model parameters ('parameters') from the " -"client's `FitRes`. The server then aggregates these contributions to compute " -"the weighted average of model parameters." +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of +#: ../../source/ref-changelog.md:1086 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg configuration " -"to clients and collect their public keys. - 'share keys': Broadcast public " -"keys among clients and collect encrypted secret" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: ../../source/ref-changelog.md:1088 msgid "" -"Each client's private key is split into N shares under the SecAgg protocol, " -"where N is the number of selected clients." +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of +#: ../../source/ref-changelog.md:1090 msgid "" -"Generally, higher `reconstruction_threshold` means better privacy guarantees " -"but less tolerance to dropouts." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +#: ../../source/ref-changelog.md:1092 msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in setting " -"the security threshold relative to the number of selected clients." +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of +#: ../../source/ref-changelog.md:1096 msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1097 msgid "" -":py:obj:`collect_masked_vectors_stage `\\ \\(driver\\, ...\\)" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1098 msgid "" -":py:obj:`setup_stage `\\ " -"\\(driver\\, context\\, state\\)" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1099 msgid "" -":py:obj:`share_keys_stage `\\ \\(driver\\, context\\, state\\)" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-changelog.md:1100 msgid "" -":py:obj:`unmask_stage `\\ " -"\\(driver\\, context\\, state\\)" -msgstr "" - -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-changelog.md:1104 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\, " -"client\\_fn\\[\\, ...\\]\\)" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: ../../source/ref-changelog.md:1105 +msgid "" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: ../../source/ref-changelog.md:1106 msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +#: ../../source/ref-changelog.md:1107 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -msgid "run\\_simulation" +#: ../../source/ref-changelog.md:1108 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: ../../source/ref-changelog.md:1109 msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of +#: ../../source/ref-changelog.md:1110 msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive " -"messages sent by the `ServerApp`." +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: ../../source/ref-changelog.md:1111 msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in the " -"ServerApp and receive a Message describing what the ClientApp should perform." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: ../../source/ref-changelog.md:1117 msgid "" -"'A dictionary, e.g {\"\": , \"\": } to configure a " -"backend. Values supported in are those included by `flwr.common." -"typing.ConfigsRecordValues`." +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: ../../source/ref-changelog.md:1119 msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. This " -"is desirable if you make use of a TensorFlow model on your `ServerApp` while " -"having your `ClientApp` running on the same GPU. Without enabling this, you " -"might encounter an out-of-memory error because TensorFlow, by default, " -"allocates all GPU memory. Read more about how `tf.config.experimental." -"set_memory_growth()` works in the TensorFlow documentation: https://www." -"tensorflow.org/api/stable." +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: ../../source/ref-changelog.md:1121 msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If " -"enabled, DEBUG-level logs will be displayed." -msgstr "" - -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: ../../source/ref-changelog.md:1123 msgid "" -"A function creating client instances. The function must take a single `str` " -"argument called `cid`. It should return a single client instance of type " -"Client. Note that the created client instances are ephemeral and will often " -"be destroyed after a single method invocation. Since client instances are " -"not long-lived, they should not attempt to carry state over method " -"invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load evaluation " -"data in the `evaluate` method itself)." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: flwr.simulation.app.start_simulation:13 of +#: ../../source/ref-changelog.md:1125 msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: ../../source/ref-changelog.md:1127 msgid "" -"List `client_id`s for each client. This is only required if `num_clients` is " -"not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: flwr.simulation.app.start_simulation:20 of +#: ../../source/ref-changelog.md:1129 msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` and " -"`num_gpus`. To understand the GPU utilization caused by `num_gpus`, as well " -"as using custom resources, please consult the Ray documentation." +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: ../../source/ref-changelog.md:1131 msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: flwr.simulation.app.start_simulation:31 of +#: ../../source/ref-changelog.md:1133 msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If no " -"strategy is provided, then `start_server` will use `flwr.server.strategy." -"FedAvg`." +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: ../../source/ref-changelog.md:1135 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`. If " -"no implementation is provided, then `start_simulation` will use `flwr.server." -"client_manager.SimpleClientManager`." +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-changelog.md:1137 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to ray.init." +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: ../../source/ref-changelog.md:1139 msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: ../../source/ref-changelog.md:1141 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: flwr.simulation.app.start_simulation:45 of +#: ../../source/ref-changelog.md:1143 msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any arguments " -"from being passed to ray.init." +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: flwr.simulation.app.start_simulation:48 of +#: ../../source/ref-changelog.md:1145 msgid "" -"Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`." +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: flwr.simulation.app.start_simulation:50 of +#: ../../source/ref-changelog.md:1147 msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: ../../source/ref-changelog.md:1149 msgid "" -"If you want to create your own Actor classes, you might need to pass some " -"input argument. You can use this dictionary for such purpose." +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: flwr.simulation.app.start_simulation:57 of +#: ../../source/ref-changelog.md:1151 msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for the " -"VCE to choose in which node the actor is placed. If you are an advanced user " -"needed more control you can use lower-level scheduling strategies to pin " -"actors to specific compute nodes (e.g. via NodeAffinitySchedulingStrategy). " -"Please note this is an advanced feature. For all details, please refer to " -"the Ray documentation: https://docs.ray.io/en/latest/ray-core/scheduling/" -"index.html" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: ../../source/ref-changelog.md:1153 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: ../../source/ref-changelog.md:1155 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "v1.9.0 (2024-06-10)" +#: ../../source/ref-changelog.md:1157 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" +#: ../../source/ref-changelog.md:1159 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: ../../source/ref-changelog.md:1161 msgid "" -"We would like to give our special thanks to all the contributors who made " -"the new version of Flower possible (in `git shortlog` order):" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/ref-changelog.md:9 +#: ../../source/ref-changelog.md:1165 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, " -"`Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`, `Robert " -"Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " -msgstr "" - -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/ref-changelog.md:13 +#: ../../source/ref-changelog.md:1166 msgid "" -"**Introduce built-in authentication (preview)** ([#2946](https://github.com/" -"adap/flower/pull/2946), [#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), [#2917](https://github." -"com/adap/flower/pull/2917), [#3386](https://github.com/adap/flower/" -"pull/3386), [#3308](https://github.com/adap/flower/pull/3308), [#3001]" -"(https://github.com/adap/flower/pull/3001), [#3409](https://github.com/adap/" -"flower/pull/3409), [#2999](https://github.com/adap/flower/pull/2999), [#2979]" -"(https://github.com/adap/flower/pull/2979), [#3389](https://github.com/adap/" -"flower/pull/3389), [#3503](https://github.com/adap/flower/pull/3503), [#3366]" -"(https://github.com/adap/flower/pull/3366), [#3357](https://github.com/adap/" -"flower/pull/3357))" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/ref-changelog.md:15 +#: ../../source/ref-changelog.md:1167 msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to connect " -"Flower to external authentication systems. With this release, the SuperLink " -"can authenticate SuperNodes using a built-in authentication system. A new " -"[how-to guide](https://flower.ai/docs/framework/how-to-authenticate-" -"supernodes.html) and a new [code example](https://github.com/adap/flower/" -"tree/main/examples/flower-authentication) help you to get started." +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/ref-changelog.md:17 +#: ../../source/ref-changelog.md:1168 msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/ref-changelog.md:19 +#: ../../source/ref-changelog.md:1169 msgid "" -"**Introduce end-to-end Docker support** ([#3483](https://github.com/adap/" -"flower/pull/3483), [#3266](https://github.com/adap/flower/pull/3266), [#3390]" -"(https://github.com/adap/flower/pull/3390), [#3283](https://github.com/adap/" -"flower/pull/3283), [#3285](https://github.com/adap/flower/pull/3285), [#3391]" -"(https://github.com/adap/flower/pull/3391), [#3403](https://github.com/adap/" -"flower/pull/3403), [#3458](https://github.com/adap/flower/pull/3458), [#3533]" -"(https://github.com/adap/flower/pull/3533), [#3453](https://github.com/adap/" -"flower/pull/3453), [#3486](https://github.com/adap/flower/pull/3486), [#3290]" -"(https://github.com/adap/flower/pull/3290))" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/ref-changelog.md:21 +#: ../../source/ref-changelog.md:1170 msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower " -"SuperNode, and the Flower `ServerApp`. This set of images enables you to run " -"all Flower components in Docker. Check out the new [how-to guide](https://" -"flower.ai/docs/framework/how-to-run-flower-using-docker.html) to get stated." +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/ref-changelog.md:23 +#: ../../source/ref-changelog.md:1171 msgid "" -"**Re-architect Flower Next simulation engine** ([#3307](https://github.com/" -"adap/flower/pull/3307), [#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), [#3273](https://github." -"com/adap/flower/pull/3273), [#3417](https://github.com/adap/flower/" -"pull/3417), [#3281](https://github.com/adap/flower/pull/3281), [#3343]" -"(https://github.com/adap/flower/pull/3343), [#3326](https://github.com/adap/" -"flower/pull/3326))" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/ref-changelog.md:25 +#: ../../source/ref-changelog.md:1175 msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves the " -"reliability of simulations, especially in notebook environments. This is a " -"significant step towards a complete overhaul of the Flower Next simulation " -"architecture." +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/ref-changelog.md:27 +#: ../../source/ref-changelog.md:1177 msgid "" -"**Upgrade simulation engine** ([#3354](https://github.com/adap/flower/" -"pull/3354), [#3378](https://github.com/adap/flower/pull/3378), [#3262]" -"(https://github.com/adap/flower/pull/3262), [#3435](https://github.com/adap/" -"flower/pull/3435), [#3501](https://github.com/adap/flower/pull/3501), [#3482]" -"(https://github.com/adap/flower/pull/3482), [#3494](https://github.com/adap/" -"flower/pull/3494))" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:29 -msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to use " -"Ray 2.10." +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/ref-changelog.md:31 +#: ../../source/ref-changelog.md:1183 msgid "" -"**Introduce FedPFT baseline** ([#3268](https://github.com/adap/flower/" -"pull/3268))" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:33 +#: ../../source/ref-changelog.md:1185 msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication " -"costs while delivering high performing models. This is work led by Mahdi " -"Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated " -"Learning with Foundation Models\" ([arxiv](https://arxiv.org/abs/2402.01862))" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/ref-changelog.md:35 +#: ../../source/ref-changelog.md:1187 msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** ([#3291](https://github.com/adap/" -"flower/pull/3291), [#3139](https://github.com/adap/flower/pull/3139), [#3284]" -"(https://github.com/adap/flower/pull/3284), [#3251](https://github.com/adap/" -"flower/pull/3251), [#3376](https://github.com/adap/flower/pull/3376), [#3287]" -"(https://github.com/adap/flower/pull/3287))" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:1189 msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's favorite " -"way of creating new Flower projects. This release introduces additional " -"`flwr new` templates for Apple MLX, Hugging Face Transformers, scikit-learn " -"and TensorFlow. In addition to that, existing templates also received " -"updates." +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:1191 msgid "" -"**Refine** `RecordSet` **API** ([#3209](https://github.com/adap/flower/" -"pull/3209), [#3331](https://github.com/adap/flower/pull/3331), [#3334]" -"(https://github.com/adap/flower/pull/3334), [#3335](https://github.com/adap/" -"flower/pull/3335), [#3375](https://github.com/adap/flower/pull/3375), [#3368]" -"(https://github.com/adap/flower/pull/3368))" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:41 +#: ../../source/ref-changelog.md:1192 msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/ref-changelog.md:43 +#: ../../source/ref-changelog.md:1194 msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), [#3461](https://github." -"com/adap/flower/pull/3461), [#3360](https://github.com/adap/flower/" -"pull/3360), [#3433](https://github.com/adap/flower/pull/3433))" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/ref-changelog.md:45 +#: ../../source/ref-changelog.md:1196 msgid "" -"Logs received a substantial update. Not only are logs now much nicer to look " -"at, but they are also more configurable." +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/ref-changelog.md:47 +#: ../../source/ref-changelog.md:1198 msgid "" -"**Improve reliability** ([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), [#3566](https://github." -"com/adap/flower/pull/3566), [#3462](https://github.com/adap/flower/" -"pull/3462), [#3225](https://github.com/adap/flower/pull/3225), [#3514]" -"(https://github.com/adap/flower/pull/3514), [#3535](https://github.com/adap/" -"flower/pull/3535), [#3372](https://github.com/adap/flower/pull/3372))" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/ref-changelog.md:49 +#: ../../source/ref-changelog.md:1200 msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/ref-changelog.md:51 +#: ../../source/ref-changelog.md:1204 msgid "" -"**Update Swift and C++ SDKs** ([#3321](https://github.com/adap/flower/" -"pull/3321), [#2763](https://github.com/adap/flower/pull/2763))" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/ref-changelog.md:53 +#: ../../source/ref-changelog.md:1205 msgid "" -"In the C++ SDK, communication-related code is now separate from main client " -"logic. A new abstract class `Communicator` has been introduced alongside a " -"gRPC implementation of it." +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/ref-changelog.md:55 +#: ../../source/ref-changelog.md:1206 msgid "" -"**Improve testing, tooling and CI/CD infrastructure** ([#3294](https://" -"github.com/adap/flower/pull/3294), [#3282](https://github.com/adap/flower/" -"pull/3282), [#3311](https://github.com/adap/flower/pull/3311), [#2878]" -"(https://github.com/adap/flower/pull/2878), [#3333](https://github.com/adap/" -"flower/pull/3333), [#3255](https://github.com/adap/flower/pull/3255), [#3349]" -"(https://github.com/adap/flower/pull/3349), [#3400](https://github.com/adap/" -"flower/pull/3400), [#3401](https://github.com/adap/flower/pull/3401), [#3399]" -"(https://github.com/adap/flower/pull/3399), [#3346](https://github.com/adap/" -"flower/pull/3346), [#3398](https://github.com/adap/flower/pull/3398), [#3397]" -"(https://github.com/adap/flower/pull/3397), [#3347](https://github.com/adap/" -"flower/pull/3347), [#3502](https://github.com/adap/flower/pull/3502), [#3387]" -"(https://github.com/adap/flower/pull/3387), [#3542](https://github.com/adap/" -"flower/pull/3542), [#3396](https://github.com/adap/flower/pull/3396), [#3496]" -"(https://github.com/adap/flower/pull/3496), [#3465](https://github.com/adap/" -"flower/pull/3465), [#3473](https://github.com/adap/flower/pull/3473), [#3484]" -"(https://github.com/adap/flower/pull/3484), [#3521](https://github.com/adap/" -"flower/pull/3521), [#3363](https://github.com/adap/flower/pull/3363), [#3497]" -"(https://github.com/adap/flower/pull/3497), [#3464](https://github.com/adap/" -"flower/pull/3464), [#3495](https://github.com/adap/flower/pull/3495), [#3478]" -"(https://github.com/adap/flower/pull/3478), [#3271](https://github.com/adap/" -"flower/pull/3271))" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/ref-changelog.md:57 +#: ../../source/ref-changelog.md:1207 msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/ref-changelog.md:59 +#: ../../source/ref-changelog.md:1208 msgid "" -"**Improve documentation** ([#3530](https://github.com/adap/flower/" -"pull/3530), [#3539](https://github.com/adap/flower/pull/3539), [#3425]" -"(https://github.com/adap/flower/pull/3425), [#3520](https://github.com/adap/" -"flower/pull/3520), [#3286](https://github.com/adap/flower/pull/3286), [#3516]" -"(https://github.com/adap/flower/pull/3516), [#3523](https://github.com/adap/" -"flower/pull/3523), [#3545](https://github.com/adap/flower/pull/3545), [#3498]" -"(https://github.com/adap/flower/pull/3498), [#3439](https://github.com/adap/" -"flower/pull/3439), [#3440](https://github.com/adap/flower/pull/3440), [#3382]" -"(https://github.com/adap/flower/pull/3382), [#3559](https://github.com/adap/" -"flower/pull/3559), [#3432](https://github.com/adap/flower/pull/3432), [#3278]" -"(https://github.com/adap/flower/pull/3278), [#3371](https://github.com/adap/" -"flower/pull/3371), [#3519](https://github.com/adap/flower/pull/3519), [#3267]" -"(https://github.com/adap/flower/pull/3267), [#3204](https://github.com/adap/" -"flower/pull/3204), [#3274](https://github.com/adap/flower/pull/3274))" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/ref-changelog.md:61 +#: ../../source/ref-changelog.md:1212 msgid "" -"As always, the Flower documentation has received many updates. Notable new " -"pages include:" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/ref-changelog.md:63 +#: ../../source/ref-changelog.md:1214 msgid "" -"[How-to upgrate to Flower Next (Flower Next migration guide)](https://flower." -"ai/docs/framework/how-to-upgrade-to-flower-next.html)" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-changelog.md:1216 msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-run-" -"flower-using-docker.html)" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/ref-changelog.md:67 +#: ../../source/ref-changelog.md:1218 msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-api/flwr.client." -"mod.html#module-flwr.client.mod)" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/ref-changelog.md:69 +#: ../../source/ref-changelog.md:1220 msgid "" -"**General updates to Flower Examples** ([#3205](https://github.com/adap/" -"flower/pull/3205), [#3226](https://github.com/adap/flower/pull/3226), [#3211]" -"(https://github.com/adap/flower/pull/3211), [#3252](https://github.com/adap/" -"flower/pull/3252), [#3427](https://github.com/adap/flower/pull/3427), [#3410]" -"(https://github.com/adap/flower/pull/3410), [#3426](https://github.com/adap/" -"flower/pull/3426), [#3228](https://github.com/adap/flower/pull/3228), [#3342]" -"(https://github.com/adap/flower/pull/3342), [#3200](https://github.com/adap/" -"flower/pull/3200), [#3202](https://github.com/adap/flower/pull/3202), [#3394]" -"(https://github.com/adap/flower/pull/3394), [#3488](https://github.com/adap/" -"flower/pull/3488), [#3329](https://github.com/adap/flower/pull/3329), [#3526]" -"(https://github.com/adap/flower/pull/3526), [#3392](https://github.com/adap/" -"flower/pull/3392), [#3474](https://github.com/adap/flower/pull/3474), [#3269]" -"(https://github.com/adap/flower/pull/3269))" -msgstr "" - -#: ../../source/ref-changelog.md:71 -msgid "As always, Flower code examples have received many updates." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/ref-changelog.md:73 -msgid "" -"**General improvements** ([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), [#3565](https://github." -"com/adap/flower/pull/3565), [#3296](https://github.com/adap/flower/" -"pull/3296), [#3305](https://github.com/adap/flower/pull/3305), [#3246]" -"(https://github.com/adap/flower/pull/3246), [#3224](https://github.com/adap/" -"flower/pull/3224), [#3475](https://github.com/adap/flower/pull/3475), [#3297]" -"(https://github.com/adap/flower/pull/3297), [#3317](https://github.com/adap/" -"flower/pull/3317), [#3429](https://github.com/adap/flower/pull/3429), [#3196]" -"(https://github.com/adap/flower/pull/3196), [#3534](https://github.com/adap/" -"flower/pull/3534), [#3240](https://github.com/adap/flower/pull/3240), [#3365]" -"(https://github.com/adap/flower/pull/3365), [#3407](https://github.com/adap/" -"flower/pull/3407), [#3563](https://github.com/adap/flower/pull/3563), [#3344]" -"(https://github.com/adap/flower/pull/3344), [#3330](https://github.com/adap/" -"flower/pull/3330), [#3436](https://github.com/adap/flower/pull/3436), [#3300]" -"(https://github.com/adap/flower/pull/3300), [#3327](https://github.com/adap/" -"flower/pull/3327), [#3254](https://github.com/adap/flower/pull/3254), [#3253]" -"(https://github.com/adap/flower/pull/3253), [#3419](https://github.com/adap/" -"flower/pull/3419), [#3289](https://github.com/adap/flower/pull/3289), [#3208]" -"(https://github.com/adap/flower/pull/3208), [#3245](https://github.com/adap/" -"flower/pull/3245), [#3319](https://github.com/adap/flower/pull/3319), [#3203]" -"(https://github.com/adap/flower/pull/3203), [#3423](https://github.com/adap/" -"flower/pull/3423), [#3352](https://github.com/adap/flower/pull/3352), [#3292]" -"(https://github.com/adap/flower/pull/3292), [#3261](https://github.com/adap/" -"flower/pull/3261))" -msgstr "" - -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" +#: ../../source/ref-changelog.md:1222 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/ref-changelog.md:77 -msgid "**Deprecate Python 3.8 support**" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" msgstr "" -#: ../../source/ref-changelog.md:79 +#: ../../source/ref-changelog.md:1228 msgid "" -"Python 3.8 will stop receiving security fixes in [October 2024](https://" -"devguide.python.org/versions/). Support for Python 3.8 is now deprecated and " -"will be removed in an upcoming release." +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:81 -msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-api` " -"([#3416](https://github.com/adap/flower/pull/3416), [#3420](https://github." -"com/adap/flower/pull/3420))" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/ref-changelog.md:83 +#: ../../source/ref-changelog.md:1233 msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api` " -"and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:85 +#: ../../source/ref-changelog.md:1235 msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` ([#3518](https://" -"github.com/adap/flower/pull/3518))" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/ref-changelog.md:87 +#: ../../source/ref-changelog.md:1237 msgid "" -"The commands `flower-server-app` and `flower-client-app` should use `--" -"superlink` instead of the now deprecated `--server`. Support for `--server` " -"will be removed in a future release." +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" +#: ../../source/ref-changelog.md:1239 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/ref-changelog.md:91 +#: ../../source/ref-changelog.md:1241 msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** `--" -"ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` ([#3512]" -"(https://github.com/adap/flower/pull/3512), [#3408](https://github.com/adap/" -"flower/pull/3408))" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:93 +#: ../../source/ref-changelog.md:1243 msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, `--ssl-" -"certfile` and `--ssl-keyfile`) with one value each. Check out the [SSL " -"connections](https://flower.ai/docs/framework/how-to-enable-ssl-connections." -"html) documentation page for details." +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/ref-changelog.md:95 +#: ../../source/ref-changelog.md:1245 msgid "" -"**Remove SuperLink** `--vce` **option** ([#3513](https://github.com/adap/" -"flower/pull/3513))" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/ref-changelog.md:97 +#: ../../source/ref-changelog.md:1247 msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for simulation, " -"simulations must now be started using the single `flower-simulation` command." +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/ref-changelog.md:99 -msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** ([#3527]" -"(https://github.com/adap/flower/pull/3527))" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/ref-changelog.md:101 +#: ../../source/ref-changelog.md:1251 msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of CLI " -"options for gRPC and REST were merged into one unified set of options. " -"Consult the [Flower CLI reference documentation](https://flower.ai/docs/" -"framework/ref-api-cli.html) for details." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/ref-changelog.md:103 -msgid "v1.8.0 (2024-04-03)" +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/ref-changelog.md:109 +#: ../../source/ref-changelog.md:1257 msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear Ashimine`, " -"`Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, `Sebastian van der " -"Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, `tabdar-khan` " +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/ref-changelog.md:113 +#: ../../source/ref-changelog.md:1259 msgid "" -"**Introduce Flower Next high-level API (stable)** ([#3002](https://github." -"com/adap/flower/pull/3002), [#2934](https://github.com/adap/flower/" -"pull/2934), [#2958](https://github.com/adap/flower/pull/2958), [#3173]" -"(https://github.com/adap/flower/pull/3173), [#3174](https://github.com/adap/" -"flower/pull/3174), [#2923](https://github.com/adap/flower/pull/2923), [#2691]" -"(https://github.com/adap/flower/pull/2691), [#3079](https://github.com/adap/" -"flower/pull/3079), [#2961](https://github.com/adap/flower/pull/2961), [#2924]" -"(https://github.com/adap/flower/pull/2924), [#3166](https://github.com/adap/" -"flower/pull/3166), [#3031](https://github.com/adap/flower/pull/3031), [#3057]" -"(https://github.com/adap/flower/pull/3057), [#3000](https://github.com/adap/" -"flower/pull/3000), [#3113](https://github.com/adap/flower/pull/3113), [#2957]" -"(https://github.com/adap/flower/pull/2957), [#3183](https://github.com/adap/" -"flower/pull/3183), [#3180](https://github.com/adap/flower/pull/3180), [#3035]" -"(https://github.com/adap/flower/pull/3035), [#3189](https://github.com/adap/" -"flower/pull/3189), [#3185](https://github.com/adap/flower/pull/3185), [#3190]" -"(https://github.com/adap/flower/pull/3190), [#3191](https://github.com/adap/" -"flower/pull/3191), [#3195](https://github.com/adap/flower/pull/3195), [#3197]" -"(https://github.com/adap/flower/pull/3197))" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/ref-changelog.md:115 +#: ../../source/ref-changelog.md:1261 msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. You " -"can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or `quickstart-" -"tensorflow`, a detailed migration guide will follow shortly). Flower Next " -"allows you to run multiple projects concurrently (we call this multi-run) " -"and execute the same project in either simulation environments or deployment " -"environments without having to change a single line of code. The best part? " -"It's fully compatible with existing Flower projects that use `Strategy`, " -"`NumPyClient` & co." +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/ref-changelog.md:117 -msgid "" -"**Introduce Flower Next low-level API (preview)** ([#3062](https://github." -"com/adap/flower/pull/3062), [#3034](https://github.com/adap/flower/" -"pull/3034), [#3069](https://github.com/adap/flower/pull/3069))" +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" msgstr "" -#: ../../source/ref-changelog.md:119 +#: ../../source/ref-changelog.md:1267 msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the new " -"Flower Next *low-level* API. The low-level API allows for granular control " -"of every aspect of the learning process by sending/receiving individual " -"messages to/from client nodes. The new `ServerApp` supports registering a " -"custom `main` function that allows writing custom training loops for methods " -"like async FL, cyclic training, or federated analytics. The new `ClientApp` " -"supports registering `train`, `evaluate` and `query` functions that can " -"access the raw message received from the `ServerApp`. New abstractions like " -"`RecordSet`, `Message` and `Context` further enable sending multiple models, " -"multiple sets of config values and metrics, stateful computations on the " -"client node and implementations of custom SMPC protocols, to name just a few." +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/ref-changelog.md:121 +#: ../../source/ref-changelog.md:1269 msgid "" -"**Introduce Flower Mods (preview)** ([#3054](https://github.com/adap/flower/" -"pull/3054), [#2911](https://github.com/adap/flower/pull/2911), [#3083]" -"(https://github.com/adap/flower/pull/3083))" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/ref-changelog.md:123 +#: ../../source/ref-changelog.md:1271 msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable modules " -"that work across different projects. Flower 1.8 already includes mods to log " -"the size of a message, the number of parameters sent over the network, " -"differential privacy with fixed clipping and adaptive clipping, local " -"differential privacy and secure aggregation protocols SecAgg and SecAgg+. " -"The Flower Mods API is released as a preview, but researchers can already " -"use it to experiment with arbirtrary SMPC protocols." +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/ref-changelog.md:125 +#: ../../source/ref-changelog.md:1290 msgid "" -"**Fine-tune LLMs with LLM FlowerTune** ([#3029](https://github.com/adap/" -"flower/pull/3029), [#3089](https://github.com/adap/flower/pull/3089), [#3092]" -"(https://github.com/adap/flower/pull/3092), [#3100](https://github.com/adap/" -"flower/pull/3100), [#3114](https://github.com/adap/flower/pull/3114), [#3162]" -"(https://github.com/adap/flower/pull/3162), [#3172](https://github.com/adap/" -"flower/pull/3172))" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/ref-changelog.md:127 +#: ../../source/ref-changelog.md:1294 msgid "" -"We are introducing LLM FlowerTune, an introductory example that demonstrates " -"federated LLM fine-tuning of pre-trained Llama2 models on the Alpaca-GPT4 " -"dataset. The example is built to be easily adapted to use different models " -"and/or datasets. Read our blog post [LLM FlowerTune: Federated LLM Fine-" -"tuning with Flower](https://flower.ai/blog/2024-03-14-llm-flowertune-" -"federated-llm-finetuning-with-flower/) for more details." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/ref-changelog.md:129 -msgid "" -"**Introduce built-in Differential Privacy (preview)** ([#2798](https://" -"github.com/adap/flower/pull/2798), [#2959](https://github.com/adap/flower/" -"pull/2959), [#3038](https://github.com/adap/flower/pull/3038), [#3147]" -"(https://github.com/adap/flower/pull/3147), [#2909](https://github.com/adap/" -"flower/pull/2909), [#2893](https://github.com/adap/flower/pull/2893), [#2892]" -"(https://github.com/adap/flower/pull/2892), [#3039](https://github.com/adap/" -"flower/pull/3039), [#3074](https://github.com/adap/flower/pull/3074))" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/ref-changelog.md:131 +#: ../../source/ref-changelog.md:1300 msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either " -"fixed or adaptive clipping. The clipping can happen either on the server-" -"side or the client-side. Local DP does both clipping and noising on the " -"client-side. A new documentation page [explains Differential Privacy " -"approaches](https://flower.ai/docs/framework/explanation-differential-" -"privacy.html) and a new how-to guide describes [how to use the new " -"Differential Privacy components](https://flower.ai/docs/framework/how-to-use-" -"differential-privacy.html) in Flower." +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/ref-changelog.md:133 +#: ../../source/ref-changelog.md:1302 msgid "" -"**Introduce built-in Secure Aggregation (preview)** ([#3120](https://github." -"com/adap/flower/pull/3120), [#3110](https://github.com/adap/flower/" -"pull/3110), [#3108](https://github.com/adap/flower/pull/3108))" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/ref-changelog.md:135 +#: ../../source/ref-changelog.md:1304 msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure " -"aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In this " -"initial release, we inlcude support for SecAgg and SecAgg+, but more " -"protocols will be implemented shortly. We'll also add detailed docs that " -"explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation in " -"the same project." +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/ref-changelog.md:137 +#: ../../source/ref-changelog.md:1306 msgid "" -"**Introduce** `flwr` **CLI (preview)** ([#2942](https://github.com/adap/" -"flower/pull/2942), [#3055](https://github.com/adap/flower/pull/3055), [#3111]" -"(https://github.com/adap/flower/pull/3111), [#3130](https://github.com/adap/" -"flower/pull/3130), [#3136](https://github.com/adap/flower/pull/3136), [#3094]" -"(https://github.com/adap/flower/pull/3094), [#3059](https://github.com/adap/" -"flower/pull/3059), [#3049](https://github.com/adap/flower/pull/3049), [#3142]" -"(https://github.com/adap/flower/pull/3142))" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/ref-changelog.md:139 +#: ../../source/ref-changelog.md:1308 msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`) " -"and then running them using the Simulation Engine (`flwr run`)." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:141 +#: ../../source/ref-changelog.md:1323 msgid "" -"**Introduce Flower Next Simulation Engine** ([#3024](https://github.com/adap/" -"flower/pull/3024), [#3061](https://github.com/adap/flower/pull/3061), [#2997]" -"(https://github.com/adap/flower/pull/2997), [#2783](https://github.com/adap/" -"flower/pull/2783), [#3184](https://github.com/adap/flower/pull/3184), [#3075]" -"(https://github.com/adap/flower/pull/3075), [#3047](https://github.com/adap/" -"flower/pull/3047), [#2998](https://github.com/adap/flower/pull/2998), [#3009]" -"(https://github.com/adap/flower/pull/3009), [#3008](https://github.com/adap/" -"flower/pull/3008))" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/ref-changelog.md:143 +#: ../../source/ref-changelog.md:1325 msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For notebook " -"environments, there's also a new `run_simulation` function that can run " -"`ServerApp` and `ClientApp`." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-changelog.md:1327 msgid "" -"**Handle SuperNode connection errors** ([#2969](https://github.com/adap/" -"flower/pull/2969))" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:1329 msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in case " -"of connection errors. The arguments `--max-retries` and `--max-wait-time` " -"can now be passed to the `flower-client-app` command. `--max-retries` will " -"define the number of tentatives the client should make before it gives up " -"trying to reconnect to the SuperLink, and, `--max-wait-time` defines the " -"time before the SuperNode gives up trying to reconnect to the SuperLink." +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/ref-changelog.md:149 -msgid "" -"**General updates to Flower Baselines** ([#2904](https://github.com/adap/" -"flower/pull/2904), [#2482](https://github.com/adap/flower/pull/2482), [#2985]" -"(https://github.com/adap/flower/pull/2985), [#2968](https://github.com/adap/" -"flower/pull/2968))" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/ref-changelog.md:151 +#: ../../source/ref-changelog.md:1350 msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:153 -msgid "" -"**Improve documentation and translations** ([#3050](https://github.com/adap/" -"flower/pull/3050), [#3044](https://github.com/adap/flower/pull/3044), [#3043]" -"(https://github.com/adap/flower/pull/3043), [#2986](https://github.com/adap/" -"flower/pull/2986), [#3041](https://github.com/adap/flower/pull/3041), [#3046]" -"(https://github.com/adap/flower/pull/3046), [#3042](https://github.com/adap/" -"flower/pull/3042), [#2978](https://github.com/adap/flower/pull/2978), [#2952]" -"(https://github.com/adap/flower/pull/2952), [#3167](https://github.com/adap/" -"flower/pull/3167), [#2953](https://github.com/adap/flower/pull/2953), [#3045]" -"(https://github.com/adap/flower/pull/3045), [#2654](https://github.com/adap/" -"flower/pull/2654), [#3082](https://github.com/adap/flower/pull/3082), [#2990]" -"(https://github.com/adap/flower/pull/2990), [#2989](https://github.com/adap/" -"flower/pull/2989))" +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" msgstr "" -#: ../../source/ref-changelog.md:155 -msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der Voort](https://" -"github.com/svdvoort) for landing a big documentation PR!" +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/ref-changelog.md:157 -msgid "" -"**General updates to Flower Examples** ([3134](https://github.com/adap/" -"flower/pull/3134), [2996](https://github.com/adap/flower/pull/2996), [2930]" -"(https://github.com/adap/flower/pull/2930), [2967](https://github.com/adap/" -"flower/pull/2967), [2467](https://github.com/adap/flower/pull/2467), [2910]" -"(https://github.com/adap/flower/pull/2910), [#2918](https://github.com/adap/" -"flower/pull/2918), [#2773](https://github.com/adap/flower/pull/2773), [#3063]" -"(https://github.com/adap/flower/pull/3063), [#3116](https://github.com/adap/" -"flower/pull/3116), [#3117](https://github.com/adap/flower/pull/3117))" +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/ref-changelog.md:159 +#: ../../source/ref-changelog.md:1354 msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) and " -"federated learning in a medical context using the popular MONAI library. " -"`quickstart-pytorch` and `quickstart-tensorflow` demonstrate the new Flower " -"Next `ServerApp` and `ClientApp`. Many other examples received considerable " -"updates as well." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:1355 msgid "" -"**General improvements** ([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), [3003](https://github.com/" -"adap/flower/pull/3003), [3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), [3085](https://github.com/" -"adap/flower/pull/3085), [3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), [2991](https://github.com/" -"adap/flower/pull/2991), [2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), [3086](https://github.com/" -"adap/flower/pull/3086), [2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), [2941](https://github.com/" -"adap/flower/pull/2941), [2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), [2973](https://github.com/" -"adap/flower/pull/2973), [2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), [3040](https://github.com/" -"adap/flower/pull/3040), [3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), [2902](https://github.com/" -"adap/flower/pull/2902), [2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), [3132](https://github.com/" -"adap/flower/pull/3132), [3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), [3064](https://github.com/" -"adap/flower/pull/3064), [3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), [3178](https://github.com/" -"adap/flower/pull/3178), [2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), [3091](https://github.com/" -"adap/flower/pull/3091), [3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), [3013](https://github.com/" -"adap/flower/pull/3013), [3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), [3068](https://github.com/" -"adap/flower/pull/3068), [2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), [2984](https://github.com/" -"adap/flower/pull/2984), [2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), [3143](https://github.com/" -"adap/flower/pull/3143), [2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), [2927](https://github.com/" -"adap/flower/pull/2927), [2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), [2912](https://github.com/" -"adap/flower/pull/2912), [3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), [2922](https://github.com/" -"adap/flower/pull/2922), [2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), [3179](https://github.com/" -"adap/flower/pull/3179), [3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), [3187](https://github.com/" -"adap/flower/pull/3187), [2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), [3144](https://github.com/" -"adap/flower/pull/3144), [3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), [#2836](https://github." -"com/adap/flower/pull/2836), [#2929](https://github.com/adap/flower/" -"pull/2929), [#2943](https://github.com/adap/flower/pull/2943), [#2955]" -"(https://github.com/adap/flower/pull/2955), [#2954](https://github.com/adap/" -"flower/pull/2954))" -msgstr "" - -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/ref-changelog.md:167 -msgid "v1.7.0 (2024-02-05)" +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" msgstr "" -#: ../../source/ref-changelog.md:173 +#: ../../source/ref-changelog.md:1359 msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles Beauville`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S Chaitanya Kumar`, `Mohammad " -"Naseri`, `Nikos Vlachakis`, `Pritam Neog`, `Robert Kuska`, `Robert Steiner`, " -"`Taner Topal`, `Yahia Salaheldin Shaaban`, `Yan Gao`, `Yasar Abbas` " +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/ref-changelog.md:177 -msgid "" -"**Introduce stateful clients (experimental)** ([#2770](https://github.com/" -"adap/flower/pull/2770), [#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), [#2643](https://github." -"com/adap/flower/pull/2643), [#2769](https://github.com/adap/flower/" -"pull/2769))" +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/ref-changelog.md:179 -msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via `start_simulation`) " -"and networked clients (via `start_client`). It's also the first preview of " -"new abstractions like `Context` and `RecordSet`. Clients can access state of " -"type `RecordSet` via `state: RecordSet = self.context.state`. Changes to " -"this `RecordSet` are preserved across different rounds of execution to " -"enable stateful computations in a unified way across simulation and " -"deployment." +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" msgstr "" -#: ../../source/ref-changelog.md:181 +#: ../../source/ref-changelog.md:1365 msgid "" -"**Improve performance** ([#2293](https://github.com/adap/flower/pull/2293))" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/ref-changelog.md:183 +#: ../../source/ref-changelog.md:1366 msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-place " -"aggregation to reduce memory consumption. The Flower client serialization/" -"deserialization has been rewritten from the ground up, which results in " -"significant speedups, especially when the client-side training time is short." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/ref-changelog.md:185 +#: ../../source/ref-changelog.md:1367 msgid "" -"**Support Federated Learning with Apple MLX and Flower** ([#2693](https://" -"github.com/adap/flower/pull/2693))" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/ref-changelog.md:187 -msgid "" -"Flower has official support for federated learning using [Apple MLX](https://" -"ml-explore.github.io/mlx) via the new `quickstart-mlx` code example." +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "" + +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" msgstr "" -#: ../../source/ref-changelog.md:189 +#: ../../source/ref-changelog.md:1373 msgid "" -"**Introduce new XGBoost cyclic strategy** ([#2666](https://github.com/adap/" -"flower/pull/2666), [#2668](https://github.com/adap/flower/pull/2668))" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/ref-changelog.md:191 -msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of " -"training (often called cyclic). The `xgboost-comprehensive` code example " -"shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower offers " -"best-in-class XGBoost support." +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:193 -msgid "" -"**Support Python 3.11** ([#2394](https://github.com/adap/flower/pull/2394))" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/ref-changelog.md:195 -msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will ensure " -"better support for users using more recent Python versions." +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/ref-changelog.md:197 -msgid "" -"**Update gRPC and ProtoBuf dependencies** ([#2814](https://github.com/adap/" -"flower/pull/2814))" +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/ref-changelog.md:199 +#: ../../source/ref-changelog.md:1381 msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/ref-changelog.md:201 +#: ../../source/ref-changelog.md:1382 msgid "" -"**Introduce Docker image for Flower server** ([#2700](https://github.com/" -"adap/flower/pull/2700), [#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), [#2695](https://github." -"com/adap/flower/pull/2695), [#2747](https://github.com/adap/flower/" -"pull/2747), [#2746](https://github.com/adap/flower/pull/2746), [#2680]" -"(https://github.com/adap/flower/pull/2680), [#2682](https://github.com/adap/" -"flower/pull/2682), [#2701](https://github.com/adap/flower/pull/2701))" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/ref-changelog.md:203 +#: ../../source/ref-changelog.md:1383 msgid "" -"The Flower server can now be run using an official Docker image. A new how-" -"to guide explains [how to run Flower using Docker](https://flower.ai/docs/" -"framework/how-to-run-flower-using-docker.html). An official Flower client " -"Docker image will follow." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/ref-changelog.md:205 +#: ../../source/ref-changelog.md:1384 msgid "" -"**Introduce** `flower-via-docker-compose` **example** ([#2626](https://" -"github.com/adap/flower/pull/2626))" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/ref-changelog.md:207 +#: ../../source/ref-changelog.md:1385 msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** ([#2719](https://" -"github.com/adap/flower/pull/2719))" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/ref-changelog.md:209 -msgid "" -"**Introduce** `custom-metrics` **example** ([#1958](https://github.com/adap/" -"flower/pull/1958))" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" msgstr "" -#: ../../source/ref-changelog.md:211 +#: ../../source/ref-example-projects.rst:4 msgid "" -"**Update code examples to use Flower Datasets** ([#2450](https://github.com/" -"adap/flower/pull/2450), [#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), [#2712](https://github." -"com/adap/flower/pull/2712))" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/ref-changelog.md:213 -msgid "" -"Several code examples were updated to use [Flower Datasets](https://flower." -"ai/docs/datasets/)." +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." msgstr "" -#: ../../source/ref-changelog.md:215 +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/ref-example-projects.rst:14 msgid "" -"**General updates to Flower Examples** ([#2381](https://github.com/adap/" -"flower/pull/2381), [#2805](https://github.com/adap/flower/pull/2805), [#2782]" -"(https://github.com/adap/flower/pull/2782), [#2806](https://github.com/adap/" -"flower/pull/2806), [#2829](https://github.com/adap/flower/pull/2829), [#2825]" -"(https://github.com/adap/flower/pull/2825), [#2816](https://github.com/adap/" -"flower/pull/2816), [#2726](https://github.com/adap/flower/pull/2726), [#2659]" -"(https://github.com/adap/flower/pull/2659), [#2655](https://github.com/adap/" -"flower/pull/2655))" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/ref-changelog.md:217 -msgid "Many Flower code examples received substantial updates." +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -msgid "**Update Flower Baselines**" +#: ../../source/ref-example-projects.rst:19 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:221 +#: ../../source/ref-example-projects.rst:20 msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), [#2771]" -"(https://github.com/adap/flower/pull/2771))" +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/ref-changelog.md:222 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" msgstr "" -#: ../../source/ref-changelog.md:223 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/ref-changelog.md:224 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: ../../source/ref-example-projects.rst:29 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:225 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-example-projects.rst:31 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:226 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" msgstr "" -#: ../../source/ref-changelog.md:228 -msgid "" -"**Improve documentation** ([#2674](https://github.com/adap/flower/" -"pull/2674), [#2480](https://github.com/adap/flower/pull/2480), [#2826]" -"(https://github.com/adap/flower/pull/2826), [#2727](https://github.com/adap/" -"flower/pull/2727), [#2761](https://github.com/adap/flower/pull/2761), [#2900]" -"(https://github.com/adap/flower/pull/2900))" -msgstr "" - -#: ../../source/ref-changelog.md:230 -msgid "" -"**Improved testing and development infrastructure** ([#2797](https://github." -"com/adap/flower/pull/2797), [#2676](https://github.com/adap/flower/" -"pull/2676), [#2644](https://github.com/adap/flower/pull/2644), [#2656]" -"(https://github.com/adap/flower/pull/2656), [#2848](https://github.com/adap/" -"flower/pull/2848), [#2675](https://github.com/adap/flower/pull/2675), [#2735]" -"(https://github.com/adap/flower/pull/2735), [#2767](https://github.com/adap/" -"flower/pull/2767), [#2732](https://github.com/adap/flower/pull/2732), [#2744]" -"(https://github.com/adap/flower/pull/2744), [#2681](https://github.com/adap/" -"flower/pull/2681), [#2699](https://github.com/adap/flower/pull/2699), [#2745]" -"(https://github.com/adap/flower/pull/2745), [#2734](https://github.com/adap/" -"flower/pull/2734), [#2731](https://github.com/adap/flower/pull/2731), [#2652]" -"(https://github.com/adap/flower/pull/2652), [#2720](https://github.com/adap/" -"flower/pull/2720), [#2721](https://github.com/adap/flower/pull/2721), [#2717]" -"(https://github.com/adap/flower/pull/2717), [#2864](https://github.com/adap/" -"flower/pull/2864), [#2694](https://github.com/adap/flower/pull/2694), [#2709]" -"(https://github.com/adap/flower/pull/2709), [#2658](https://github.com/adap/" -"flower/pull/2658), [#2796](https://github.com/adap/flower/pull/2796), [#2692]" -"(https://github.com/adap/flower/pull/2692), [#2657](https://github.com/adap/" -"flower/pull/2657), [#2813](https://github.com/adap/flower/pull/2813), [#2661]" -"(https://github.com/adap/flower/pull/2661), [#2398](https://github.com/adap/" -"flower/pull/2398))" -msgstr "" - -#: ../../source/ref-changelog.md:232 -msgid "" -"The Flower testing and development infrastructure has received substantial " -"updates. This makes Flower 1.7 the most tested release ever." -msgstr "" - -#: ../../source/ref-changelog.md:234 -msgid "" -"**Update dependencies** ([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), [#2739](https://github." -"com/adap/flower/pull/2739), [#2837](https://github.com/adap/flower/" -"pull/2837), [#2788](https://github.com/adap/flower/pull/2788), [#2811]" -"(https://github.com/adap/flower/pull/2811), [#2774](https://github.com/adap/" -"flower/pull/2774), [#2790](https://github.com/adap/flower/pull/2790), [#2751]" -"(https://github.com/adap/flower/pull/2751), [#2850](https://github.com/adap/" -"flower/pull/2850), [#2812](https://github.com/adap/flower/pull/2812), [#2872]" -"(https://github.com/adap/flower/pull/2872), [#2736](https://github.com/adap/" -"flower/pull/2736), [#2756](https://github.com/adap/flower/pull/2756), [#2857]" -"(https://github.com/adap/flower/pull/2857), [#2757](https://github.com/adap/" -"flower/pull/2757), [#2810](https://github.com/adap/flower/pull/2810), [#2740]" -"(https://github.com/adap/flower/pull/2740), [#2789](https://github.com/adap/" -"flower/pull/2789))" -msgstr "" - -#: ../../source/ref-changelog.md:236 -msgid "" -"**General improvements** ([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), [#2877](https://github." -"com/adap/flower/pull/2877), [#2690](https://github.com/adap/flower/" -"pull/2690), [#2889](https://github.com/adap/flower/pull/2889), [#2874]" -"(https://github.com/adap/flower/pull/2874), [#2819](https://github.com/adap/" -"flower/pull/2819), [#2689](https://github.com/adap/flower/pull/2689), [#2457]" -"(https://github.com/adap/flower/pull/2457), [#2870](https://github.com/adap/" -"flower/pull/2870), [#2669](https://github.com/adap/flower/pull/2669), [#2876]" -"(https://github.com/adap/flower/pull/2876), [#2885](https://github.com/adap/" -"flower/pull/2885), [#2858](https://github.com/adap/flower/pull/2858), [#2867]" -"(https://github.com/adap/flower/pull/2867), [#2351](https://github.com/adap/" -"flower/pull/2351), [#2886](https://github.com/adap/flower/pull/2886), [#2860]" -"(https://github.com/adap/flower/pull/2860), [#2828](https://github.com/adap/" -"flower/pull/2828), [#2869](https://github.com/adap/flower/pull/2869), [#2875]" -"(https://github.com/adap/flower/pull/2875), [#2733](https://github.com/adap/" -"flower/pull/2733), [#2488](https://github.com/adap/flower/pull/2488), [#2646]" -"(https://github.com/adap/flower/pull/2646), [#2879](https://github.com/adap/" -"flower/pull/2879), [#2821](https://github.com/adap/flower/pull/2821), [#2855]" -"(https://github.com/adap/flower/pull/2855), [#2800](https://github.com/adap/" -"flower/pull/2800), [#2807](https://github.com/adap/flower/pull/2807), [#2801]" -"(https://github.com/adap/flower/pull/2801), [#2804](https://github.com/adap/" -"flower/pull/2804), [#2851](https://github.com/adap/flower/pull/2851), [#2787]" -"(https://github.com/adap/flower/pull/2787), [#2852](https://github.com/adap/" -"flower/pull/2852), [#2672](https://github.com/adap/flower/pull/2672), [#2759]" -"(https://github.com/adap/flower/pull/2759))" -msgstr "" - -#: ../../source/ref-changelog.md:240 -msgid "" -"**Deprecate** `start_numpy_client` ([#2563](https://github.com/adap/flower/" -"pull/2563), [#2718](https://github.com/adap/flower/pull/2718))" -msgstr "" - -#: ../../source/ref-changelog.md:242 +#: ../../source/ref-example-projects.rst:36 msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we have " -"introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need to " -"first call the `.to_client()` method and then pass returned `Client` object " -"to `start_client`. The examples and the documentation have been updated " -"accordingly." +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/ref-changelog.md:244 +#: ../../source/ref-example-projects.rst:38 msgid "" -"**Deprecate legacy DP wrappers** ([#2749](https://github.com/adap/flower/" -"pull/2749))" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:246 +#: ../../source/ref-example-projects.rst:40 msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is in " -"preparation for an all-new pluggable version of differential privacy support " -"in Flower." +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/ref-changelog.md:248 -msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/ref-changelog.md:250 +#: ../../source/ref-example-projects.rst:46 msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` ([#2890]" -"(https://github.com/adap/flower/pull/2890))" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/ref-changelog.md:252 +#: ../../source/ref-example-projects.rst:49 msgid "" -"**Drop experimental** `Task` **fields** ([#2866](https://github.com/adap/" -"flower/pull/2866), [#2865](https://github.com/adap/flower/pull/2865))" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:254 +#: ../../source/ref-example-projects.rst:51 msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed fields " -"are superseded by the new `RecordSet` abstraction." +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/ref-changelog.md:256 +#: ../../source/ref-faq.rst:4 msgid "" -"**Retire MXNet examples** ([#2724](https://github.com/adap/flower/pull/2724))" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/ref-changelog.md:258 -msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/ref-changelog.md:260 -msgid "v1.6.0 (2023-11-28)" +#: ../../source/ref-faq.rst:9 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/ref-changelog.md:266 +#: ../../source/ref-faq.rst:11 msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`, " -"`Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, `Steve " -"Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, `cnxdeveloper`, " -"`k3nfalt` " +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/ref-changelog.md:270 +#: ../../source/ref-faq.rst:12 msgid "" -"**Add experimental support for Python 3.12** ([#2565](https://github.com/" -"adap/flower/pull/2565))" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/ref-changelog.md:272 -msgid "" -"**Add new XGBoost examples** ([#2612](https://github.com/adap/flower/" -"pull/2612), [#2554](https://github.com/adap/flower/pull/2554), [#2617]" -"(https://github.com/adap/flower/pull/2617), [#2618](https://github.com/adap/" -"flower/pull/2618), [#2619](https://github.com/adap/flower/pull/2619), [#2567]" -"(https://github.com/adap/flower/pull/2567))" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/ref-changelog.md:274 +#: ../../source/ref-faq.rst:16 msgid "" -"We have added a new `xgboost-quickstart` example alongside a new `xgboost-" -"comprehensive` example that goes more in-depth." +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/ref-changelog.md:276 -msgid "" -"**Add Vertical FL example** ([#2598](https://github.com/adap/flower/" -"pull/2598))" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/ref-changelog.md:278 +#: ../../source/ref-faq.rst:20 msgid "" -"We had many questions about Vertical Federated Learning using Flower, so we " -"decided to add an simple example for it on the [Titanic dataset](https://www." -"kaggle.com/competitions/titanic/data) alongside a tutorial (in the README)." +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/ref-changelog.md:280 +#: ../../source/ref-faq.rst:22 msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` ([#2292](https://" -"github.com/adap/flower/pull/2292))" +"`Android Kotlin example `_" msgstr "" -#: ../../source/ref-changelog.md:282 -msgid "" -"**Update REST API to support create and delete nodes** ([#2283](https://" -"github.com/adap/flower/pull/2283))" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" msgstr "" -#: ../../source/ref-changelog.md:284 +#: ../../source/ref-faq.rst:27 msgid "" -"**Update the Android SDK** ([#2187](https://github.com/adap/flower/" -"pull/2187))" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "Add gRPC request-response capability to the Android SDK." +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/ref-changelog.md:288 -msgid "" -"**Update the C++ SDK** ([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), [#2523](https://github." -"com/adap/flower/pull/2523), [#2522](https://github.com/adap/flower/" -"pull/2522))" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." +#: ../../source/ref-faq.rst:31 +msgid "Local blockchain with federated learning simulation." msgstr "" -#: ../../source/ref-changelog.md:292 +#: ../../source/ref-faq.rst:32 msgid "" -"**Make HTTPS the new default** ([#2591](https://github.com/adap/flower/" -"pull/2591), [#2636](https://github.com/adap/flower/pull/2636))" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/ref-changelog.md:294 +#: ../../source/ref-faq.rst:33 msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP for " -"prototyping. The same applies to `flower-client`, which can either use user-" -"provided credentials or gRPC-bundled certificates to connect to an HTTPS-" -"enabled server or requires opt-out via passing `--insecure` to enable " -"insecure HTTP connections." +"`Flower meets Nevermined YouTube video " +"`_." msgstr "" -#: ../../source/ref-changelog.md:296 +#: ../../source/ref-faq.rst:34 msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` will " -"still start in insecure mode by default. In a future release, insecure " -"connections will require user opt-in by passing `insecure=True`." +"`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-changelog.md:298 +#: ../../source/ref-faq.rst:35 msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), [#2493](https://github." -"com/adap/flower/pull/2493))" +"`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-changelog.md:300 +#: ../../source/ref-faq.rst:36 msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as standalone " -"processes (i.e. via `start_client`) or in simulation (i.e. via " -"`start_simulation`) without requiring changes to how the client class is " -"defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +"`Flower meets Talan GitHub Repository " +"`_ ." msgstr "" -#: ../../source/ref-changelog.md:302 -msgid "" -"**Add new** `Bulyan` **strategy** ([#1817](https://github.com/adap/flower/" -"pull/1817), [#1891](https://github.com/adap/flower/pull/1891))" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" msgstr "" -#: ../../source/ref-changelog.md:304 +#: ../../source/ref-telemetry.md:3 msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., 2018]" -"(https://arxiv.org/abs/1802.07927)" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." msgstr "" -#: ../../source/ref-changelog.md:306 +#: ../../source/ref-telemetry.md:5 msgid "" -"**Add new** `XGB Bagging` **strategy** ([#2611](https://github.com/adap/" -"flower/pull/2611))" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." msgstr "" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 -msgid "" -"**Introduce `WorkloadState`** ([#2564](https://github.com/adap/flower/" -"pull/2564), [#2632](https://github.com/adap/flower/pull/2632))" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" msgstr "" -#: ../../source/ref-changelog.md:314 -msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), [#2286](https://" -"github.com/adap/flower/pull/2286), [#2509](https://github.com/adap/flower/" -"pull/2509))" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" msgstr "" -#: ../../source/ref-changelog.md:316 +#: ../../source/ref-telemetry.md:11 msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), [#2400]" -"(https://github.com/adap/flower/pull/2400))" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/ref-changelog.md:318 +#: ../../source/ref-telemetry.md:12 msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), [#2507](https://" -"github.com/adap/flower/pull/2507))" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." msgstr "" -#: ../../source/ref-changelog.md:320 +#: ../../source/ref-telemetry.md:13 msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), [#2508](https://" -"github.com/adap/flower/pull/2508))" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" msgstr "" -#: ../../source/ref-changelog.md:322 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." msgstr "" -#: ../../source/ref-changelog.md:324 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" msgstr "" -#: ../../source/ref-changelog.md:326 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" msgstr "" -#: ../../source/ref-changelog.md:328 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." msgstr "" -#: ../../source/ref-changelog.md:330 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" msgstr "" -#: ../../source/ref-changelog.md:332 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" msgstr "" -#: ../../source/ref-changelog.md:334 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." msgstr "" -#: ../../source/ref-changelog.md:336 +#: ../../source/ref-telemetry.md:32 msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), [#2615](https://" -"github.com/adap/flower/pull/2615))" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" msgstr "" -#: ../../source/ref-changelog.md:338 +#: ../../source/ref-telemetry.md:34 msgid "" -"**General updates to Flower Examples** ([#2384](https://github.com/adap/" -"flower/pull/2384), [#2425](https://github.com/adap/flower/pull/2425), [#2526]" -"(https://github.com/adap/flower/pull/2526), [#2302](https://github.com/adap/" -"flower/pull/2302), [#2545](https://github.com/adap/flower/pull/2545))" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." msgstr "" -#: ../../source/ref-changelog.md:340 +#: ../../source/ref-telemetry.md:36 msgid "" -"**General updates to Flower Baselines** ([#2301](https://github.com/adap/" -"flower/pull/2301), [#2305](https://github.com/adap/flower/pull/2305), [#2307]" -"(https://github.com/adap/flower/pull/2307), [#2327](https://github.com/adap/" -"flower/pull/2327), [#2435](https://github.com/adap/flower/pull/2435), [#2462]" -"(https://github.com/adap/flower/pull/2462), [#2463](https://github.com/adap/" -"flower/pull/2463), [#2461](https://github.com/adap/flower/pull/2461), [#2469]" -"(https://github.com/adap/flower/pull/2469), [#2466](https://github.com/adap/" -"flower/pull/2466), [#2471](https://github.com/adap/flower/pull/2471), [#2472]" -"(https://github.com/adap/flower/pull/2472), [#2470](https://github.com/adap/" -"flower/pull/2470))" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." msgstr "" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-telemetry.md:38 msgid "" -"**General updates to the simulation engine** ([#2331](https://github.com/" -"adap/flower/pull/2331), [#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), [#2294](https://github." -"com/adap/flower/pull/2294))" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." msgstr "" -#: ../../source/ref-changelog.md:344 +#: ../../source/ref-telemetry.md:40 msgid "" -"**General updates to Flower SDKs** ([#2288](https://github.com/adap/flower/" -"pull/2288), [#2429](https://github.com/adap/flower/pull/2429), [#2555]" -"(https://github.com/adap/flower/pull/2555), [#2543](https://github.com/adap/" -"flower/pull/2543), [#2544](https://github.com/adap/flower/pull/2544), [#2597]" -"(https://github.com/adap/flower/pull/2597), [#2623](https://github.com/adap/" -"flower/pull/2623))" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." msgstr "" -#: ../../source/ref-changelog.md:346 +#: ../../source/ref-telemetry.md:42 msgid "" -"**General improvements** ([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), [#2313](https://github." -"com/adap/flower/pull/2313), [#2316](https://github.com/adap/flower/" -"pull/2316), [#2317](https://github.com/adap/flower/pull/2317), [#2349]" -"(https://github.com/adap/flower/pull/2349), [#2360](https://github.com/adap/" -"flower/pull/2360), [#2402](https://github.com/adap/flower/pull/2402), [#2446]" -"(https://github.com/adap/flower/pull/2446), [#2561](https://github.com/adap/" -"flower/pull/2561), [#2273](https://github.com/adap/flower/pull/2273), [#2267]" -"(https://github.com/adap/flower/pull/2267), [#2274](https://github.com/adap/" -"flower/pull/2274), [#2275](https://github.com/adap/flower/pull/2275), [#2432]" -"(https://github.com/adap/flower/pull/2432), [#2251](https://github.com/adap/" -"flower/pull/2251), [#2321](https://github.com/adap/flower/pull/2321), [#1936]" -"(https://github.com/adap/flower/pull/1936), [#2408](https://github.com/adap/" -"flower/pull/2408), [#2413](https://github.com/adap/flower/pull/2413), [#2401]" -"(https://github.com/adap/flower/pull/2401), [#2531](https://github.com/adap/" -"flower/pull/2531), [#2534](https://github.com/adap/flower/pull/2534), [#2535]" -"(https://github.com/adap/flower/pull/2535), [#2521](https://github.com/adap/" -"flower/pull/2521), [#2553](https://github.com/adap/flower/pull/2553), [#2596]" -"(https://github.com/adap/flower/pull/2596))" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." msgstr "" -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 +#: ../../source/ref-telemetry.md:44 msgid "" -"Flower received many improvements under the hood, too many to list here." +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." msgstr "" -#: ../../source/ref-changelog.md:352 +#: ../../source/ref-telemetry.md:46 msgid "" -"**Remove support for Python 3.7** ([#2280](https://github.com/adap/flower/" -"pull/2280), [#2299](https://github.com/adap/flower/pull/2299), [#2304]" -"(https://github.com/adap/flower/pull/2304), [#2306](https://github.com/adap/" -"flower/pull/2306), [#2355](https://github.com/adap/flower/pull/2355), [#2356]" -"(https://github.com/adap/flower/pull/2356))" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." msgstr "" -#: ../../source/ref-changelog.md:354 +#: ../../source/ref-telemetry.md:48 msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes " -"support. Flower now requires Python 3.8." +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." msgstr "" -#: ../../source/ref-changelog.md:356 +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "" + +#: ../../source/ref-telemetry.md:52 msgid "" -"**Remove experimental argument** `rest` **from** `start_client` ([#2324]" -"(https://github.com/adap/flower/pull/2324))" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." msgstr "" -#: ../../source/ref-changelog.md:358 +#: ../../source/ref-telemetry.md:58 msgid "" -"The (still experimental) argument `rest` was removed from `start_client` and " -"`start_numpy_client`. Use `transport=\"rest\"` to opt into the experimental " -"REST API instead." +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" msgstr "" -#: ../../source/ref-changelog.md:360 -msgid "v1.5.0 (2023-08-31)" +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" msgstr "" -#: ../../source/ref-changelog.md:366 +#: ../../source/ref-telemetry.md:66 msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." msgstr "" -#: ../../source/ref-changelog.md:370 +#: ../../source/tutorial-quickstart-android.rst:-1 msgid "" -"**Introduce new simulation engine** ([#1969](https://github.com/adap/flower/" -"pull/1969), [#2221](https://github.com/adap/flower/pull/2221), [#2248]" -"(https://github.com/adap/flower/pull/2248))" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." msgstr "" -#: ../../source/ref-changelog.md:372 -msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and " -"memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-only, " -"CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" msgstr "" -#: ../../source/ref-changelog.md:374 +#: ../../source/tutorial-quickstart-android.rst:9 msgid "" -"Comprehensive documentation includes a new [how-to run simulations](https://" -"flower.ai/docs/framework/how-to-run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial series](https://www." -"youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"Let's build a federated learning system using TFLite and Flower on " +"Android!" msgstr "" -#: ../../source/ref-changelog.md:376 +#: ../../source/tutorial-quickstart-android.rst:11 msgid "" -"**Restructure Flower Docs** ([#1824](https://github.com/adap/flower/" -"pull/1824), [#1865](https://github.com/adap/flower/pull/1865), [#1884]" -"(https://github.com/adap/flower/pull/1884), [#1887](https://github.com/adap/" -"flower/pull/1887), [#1919](https://github.com/adap/flower/pull/1919), [#1922]" -"(https://github.com/adap/flower/pull/1922), [#1920](https://github.com/adap/" -"flower/pull/1920), [#1923](https://github.com/adap/flower/pull/1923), [#1924]" -"(https://github.com/adap/flower/pull/1924), [#1962](https://github.com/adap/" -"flower/pull/1962), [#2006](https://github.com/adap/flower/pull/2006), [#2133]" -"(https://github.com/adap/flower/pull/2133), [#2203](https://github.com/adap/" -"flower/pull/2203), [#2215](https://github.com/adap/flower/pull/2215), [#2122]" -"(https://github.com/adap/flower/pull/2122), [#2223](https://github.com/adap/" -"flower/pull/2223), [#2219](https://github.com/adap/flower/pull/2219), [#2232]" -"(https://github.com/adap/flower/pull/2232), [#2233](https://github.com/adap/" -"flower/pull/2233), [#2234](https://github.com/adap/flower/pull/2234), [#2235]" -"(https://github.com/adap/flower/pull/2235), [#2237](https://github.com/adap/" -"flower/pull/2237), [#2238](https://github.com/adap/flower/pull/2238), [#2242]" -"(https://github.com/adap/flower/pull/2242), [#2231](https://github.com/adap/" -"flower/pull/2231), [#2243](https://github.com/adap/flower/pull/2243), [#2227]" -"(https://github.com/adap/flower/pull/2227))" +"Please refer to the `full code example " +"`_ to learn " +"more." msgstr "" -#: ../../source/ref-changelog.md:378 -msgid "" -"Much effort went into a completely restructured Flower docs experience. The " -"documentation on [flower.ai/docs](https://flower.ai/docs) is now divided " -"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS SDK, " -"and code example projects." +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" msgstr "" -#: ../../source/ref-changelog.md:380 +#: ../../source/tutorial-quickstart-fastai.rst:6 msgid "" -"**Introduce Flower Swift SDK** ([#1858](https://github.com/adap/flower/" -"pull/1858), [#1897](https://github.com/adap/flower/pull/1897))" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -#: ../../source/ref-changelog.md:382 -msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support on " -"iOS is improving, and alongside the Swift SDK and code example, there is now " -"also an iOS quickstart tutorial." +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" msgstr "" -#: ../../source/ref-changelog.md:384 +#: ../../source/tutorial-quickstart-fastai.rst:18 msgid "" -"**Introduce Flower Android SDK** ([#2131](https://github.com/adap/flower/" -"pull/2131))" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -#: ../../source/ref-changelog.md:386 +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "그 후 가상 환경을 활성화합니다:" + +#: ../../source/tutorial-quickstart-fastai.rst:41 msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower support " -"on Android is improving, and alongside the Kotlin SDK and code example, " -"there is now also an Android quickstart tutorial." +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -#: ../../source/ref-changelog.md:388 -msgid "" -"**Introduce new end-to-end testing infrastructure** ([#1842](https://github." -"com/adap/flower/pull/1842), [#2071](https://github.com/adap/flower/" -"pull/2071), [#2072](https://github.com/adap/flower/pull/2072), [#2068]" -"(https://github.com/adap/flower/pull/2068), [#2067](https://github.com/adap/" -"flower/pull/2067), [#2069](https://github.com/adap/flower/pull/2069), [#2073]" -"(https://github.com/adap/flower/pull/2073), [#2070](https://github.com/adap/" -"flower/pull/2070), [#2074](https://github.com/adap/flower/pull/2074), [#2082]" -"(https://github.com/adap/flower/pull/2082), [#2084](https://github.com/adap/" -"flower/pull/2084), [#2093](https://github.com/adap/flower/pull/2093), [#2109]" -"(https://github.com/adap/flower/pull/2109), [#2095](https://github.com/adap/" -"flower/pull/2095), [#2140](https://github.com/adap/flower/pull/2140), [#2137]" -"(https://github.com/adap/flower/pull/2137), [#2165](https://github.com/adap/" -"flower/pull/2165))" +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" msgstr "" -#: ../../source/ref-changelog.md:390 +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 msgid "" -"A new testing infrastructure ensures that new changes stay compatible with " -"existing framework integrations or strategies." +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" msgstr "" -#: ../../source/ref-changelog.md:392 -msgid "**Deprecate Python 3.7**" +#: ../../source/tutorial-quickstart-fastai.rst:108 +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:394 +#: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for " -"Python 3.7 is now deprecated and will be removed in an upcoming release." +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" -#: ../../source/ref-changelog.md:396 -msgid "" -"**Add new** `FedTrimmedAvg` **strategy** ([#1769](https://github.com/adap/" -"flower/pull/1769), [#1853](https://github.com/adap/flower/pull/1853))" +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/ref-changelog.md:398 +#: ../../source/tutorial-quickstart-huggingface.rst:6 msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, 2018]" -"(https://arxiv.org/abs/1803.01498)." +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/ref-changelog.md:400 +#: ../../source/tutorial-quickstart-huggingface.rst:12 msgid "" -"**Introduce start_driver** ([#1697](https://github.com/adap/flower/" -"pull/1697))" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -#: ../../source/ref-changelog.md:402 +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 msgid "" -"In addition to `start_server` and using the raw Driver API, there is a new " -"`start_driver` function that allows for running `start_server` scripts as a " -"Flower driver with only a single-line code change. Check out the `mt-" -"pytorch` code example to see a working example using `start_driver`." +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/ref-changelog.md:404 +#: ../../source/tutorial-quickstart-huggingface.rst:25 msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** ([#1785]" -"(https://github.com/adap/flower/pull/1785))" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:406 +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a " -"driver script. The included `driver.py` and `server.py` have been aligned to " -"demonstrate both the low-level way and the high-level way of building server-" -"side logic." +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/ref-changelog.md:408 +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 msgid "" -"**Migrate experimental REST API to Starlette** ([2171](https://github.com/" -"adap/flower/pull/2171))" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/ref-changelog.md:410 -msgid "" -"The (experimental) REST API used to be implemented in [FastAPI](https://" -"fastapi.tiangolo.com/), but it has now been migrated to use [Starlette]" -"(https://www.starlette.io/) directly." +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" msgstr "" -#: ../../source/ref-changelog.md:412 -msgid "" -"Please note: The REST request-response API is still experimental and will " -"likely change significantly over time." +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/ref-changelog.md:414 +#: ../../source/tutorial-quickstart-huggingface.rst:109 msgid "" -"**Introduce experimental gRPC request-response API** ([#1867](https://github." -"com/adap/flower/pull/1867), [#1901](https://github.com/adap/flower/" -"pull/1901))" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/ref-changelog.md:416 +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) and " -"the experimental REST API, there is now a new gRPC API that uses a request-" -"response model to communicate with client nodes." +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/ref-changelog.md:418 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#, fuzzy +msgid "The Data" +msgstr "Metadata" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 msgid "" -"Please note: The gRPC request-response API is still experimental and will " -"likely change significantly over time." +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/ref-changelog.md:420 -msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` ([#1880](https://github.com/adap/flower/" -"pull/1880))" +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +msgid "The Model" msgstr "" -#: ../../source/ref-changelog.md:422 +#: ../../source/tutorial-quickstart-huggingface.rst:173 msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in favour " -"of a new argument `transport`. `start_client(transport=\"rest\")` will yield " -"the same behaviour as `start_client(rest=True)` did before. All code should " -"migrate to the new argument `transport`. The deprecated argument `rest` will " -"be removed in a future release." +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/ref-changelog.md:424 +#: ../../source/tutorial-quickstart-huggingface.rst:185 msgid "" -"**Add a new gRPC option** ([#2197](https://github.com/adap/flower/pull/2197))" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/ref-changelog.md:426 +#: ../../source/tutorial-quickstart-huggingface.rst:188 msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls` " -"option set to 0 by default. This prevents the clients from sending keepalive " -"pings when there is no outstanding stream." +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/ref-changelog.md:428 +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#, fuzzy +msgid "The ClientApp" +msgstr "클라이언트앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 msgid "" -"**Improve example notebooks** ([#2005](https://github.com/adap/flower/" -"pull/2005))" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/ref-changelog.md:430 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/ref-changelog.md:432 +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), [#1981](https://github." -"com/adap/flower/pull/1981), [#1988](https://github.com/adap/flower/" -"pull/1988), [#1984](https://github.com/adap/flower/pull/1984), [#1982]" -"(https://github.com/adap/flower/pull/1982), [#2112](https://github.com/adap/" -"flower/pull/2112), [#2144](https://github.com/adap/flower/pull/2144), [#2174]" -"(https://github.com/adap/flower/pull/2174), [#2225](https://github.com/adap/" -"flower/pull/2225), [#2183](https://github.com/adap/flower/pull/2183))" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/ref-changelog.md:434 +#: ../../source/tutorial-quickstart-huggingface.rst:283 msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A " -"major upgrade is that all code examples now have a `requirements.txt` (in " -"addition to `pyproject.toml`)." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:436 +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#, fuzzy +msgid "The ServerApp" +msgstr "Flower 서버앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 msgid "" -"**General improvements** ([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), [#1884](https://github." -"com/adap/flower/pull/1884), [#1837](https://github.com/adap/flower/" -"pull/1837), [#1477](https://github.com/adap/flower/pull/1477), [#2171]" -"(https://github.com/adap/flower/pull/2171))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/ref-changelog.md:450 +#: ../../source/tutorial-quickstart-huggingface.rst:361 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, `Iacob-" -"Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal Sarkhel`, " -"`L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic Lane`, " -"`Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, `Steve " -"Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:454 +#: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and example)** " -"([#1694](https://github.com/adap/flower/pull/1694), [#1709](https://github." -"com/adap/flower/pull/1709), [#1715](https://github.com/adap/flower/" -"pull/1715), [#1717](https://github.com/adap/flower/pull/1717), [#1763]" -"(https://github.com/adap/flower/pull/1763), [#1795](https://github.com/adap/" -"flower/pull/1795))" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." msgstr "" -#: ../../source/ref-changelog.md:456 -msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg` " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/" -"strategy/fedxgb_nn_avg.py), and a [code example](https://github.com/adap/" -"flower/tree/main/examples/xgboost-quickstart) that demonstrates the usage of " -"this new strategy in an XGBoost project." +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" msgstr "" -#: ../../source/ref-changelog.md:458 +#: ../../source/tutorial-quickstart-ios.rst:9 msgid "" -"**Introduce iOS SDK (preview)** ([#1621](https://github.com/adap/flower/" -"pull/1621), [#1764](https://github.com/adap/flower/pull/1764))" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." msgstr "" -#: ../../source/ref-changelog.md:460 +#: ../../source/tutorial-quickstart-ios.rst:12 msgid "" -"This is a major update for anyone wanting to implement Federated Learning on " -"iOS mobile devices. We now have a swift iOS SDK present under [src/swift/" -"flwr](https://github.com/adap/flower/tree/main/src/swift/flwr) that will " -"facilitate greatly the app creating process. To showcase its use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." msgstr "" -#: ../../source/ref-changelog.md:462 +#: ../../source/tutorial-quickstart-ios.rst:17 msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** ([#1657](https://" -"github.com/adap/flower/pull/1657), [#1721](https://github.com/adap/flower/" -"pull/1721))" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." msgstr "" -#: ../../source/ref-changelog.md:464 +#: ../../source/tutorial-quickstart-ios.rst:20 msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-what-" -"is-federated-learning.html) in our documentation explains the basics of " -"Fedetated Learning. It enables anyone who's unfamiliar with Federated " -"Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." msgstr "" -#: ../../source/ref-changelog.md:466 +#: ../../source/tutorial-quickstart-ios.rst:26 msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** ([#1513](https://github.com/" -"adap/flower/pull/1513), [#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), [#1679](https://github." -"com/adap/flower/pull/1679))" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" msgstr "" -#: ../../source/ref-changelog.md:468 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated " -"Optimization in Heterogeneous Networks (Li et al., 2018)](https://arxiv.org/" -"abs/1812.06127). It uses the `FedProx` strategy, which aims at making " -"convergence more robust in heterogeneous settings." +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" msgstr "" -#: ../../source/ref-changelog.md:470 -msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** ([#1655](https://github." -"com/adap/flower/pull/1655))" +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" msgstr "" -#: ../../source/ref-changelog.md:472 +#: ../../source/tutorial-quickstart-ios.rst:42 msgid "" -"This new baseline replicates an experiment evaluating the performance of the " -"FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A Benchmark " -"for Federated Settings (Caldas et al., 2018)](https://arxiv.org/" -"abs/1812.01097)." +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" msgstr "" -#: ../../source/ref-changelog.md:474 +#: ../../source/tutorial-quickstart-ios.rst:80 msgid "" -"**Introduce (experimental) REST API** ([#1594](https://github.com/adap/" -"flower/pull/1594), [#1690](https://github.com/adap/flower/pull/1690), [#1695]" -"(https://github.com/adap/flower/pull/1695), [#1712](https://github.com/adap/" -"flower/pull/1712), [#1802](https://github.com/adap/flower/pull/1802), [#1770]" -"(https://github.com/adap/flower/pull/1770), [#1733](https://github.com/adap/" -"flower/pull/1733))" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." msgstr "" -#: ../../source/ref-changelog.md:476 -msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" msgstr "" -#: ../../source/ref-changelog.md:478 +#: ../../source/tutorial-quickstart-ios.rst:94 msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." msgstr "" -#: ../../source/ref-changelog.md:480 +#: ../../source/tutorial-quickstart-ios.rst:112 msgid "" -"**Improve the (experimental) Driver API** ([#1663](https://github.com/adap/" -"flower/pull/1663), [#1666](https://github.com/adap/flower/pull/1666), [#1667]" -"(https://github.com/adap/flower/pull/1667), [#1664](https://github.com/adap/" -"flower/pull/1664), [#1675](https://github.com/adap/flower/pull/1675), [#1676]" -"(https://github.com/adap/flower/pull/1676), [#1693](https://github.com/adap/" -"flower/pull/1693), [#1662](https://github.com/adap/flower/pull/1662), [#1794]" -"(https://github.com/adap/flower/pull/1794))" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." msgstr "" -#: ../../source/ref-changelog.md:482 +#: ../../source/tutorial-quickstart-ios.rst:118 msgid "" -"The Driver API is still an experimental feature, but this release introduces " -"some major upgrades. One of the main improvements is the introduction of an " -"SQLite database to store server state on disk (instead of in-memory). " -"Another improvement is that tasks (instructions or results) that have been " -"delivered will now be deleted. This greatly improves the memory efficiency " -"of a long-running Flower server." +"After we have all of the necessary information, let's create our Flower " +"client." msgstr "" -#: ../../source/ref-changelog.md:484 +#: ../../source/tutorial-quickstart-ios.rst:133 msgid "" -"**Fix spilling issues related to Ray during simulations** ([#1698](https://" -"github.com/adap/flower/pull/1698))" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." msgstr "" -#: ../../source/ref-changelog.md:486 +#: ../../source/tutorial-quickstart-ios.rst:141 msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts of " -"data that would make the training unable to continue. This is now fixed! 🎉" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." msgstr "" -#: ../../source/ref-changelog.md:488 -msgid "" -"**Add new example using** `TabNet` **and Flower** ([#1725](https://github." -"com/adap/flower/pull/1725))" +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" msgstr "" -#: ../../source/ref-changelog.md:490 +#: ../../source/tutorial-quickstart-ios.rst:150 msgid "" -"TabNet is a powerful and flexible framework for training machine learning " -"models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples/" -"quickstart-tabnet)." +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/ref-changelog.md:492 -msgid "" -"**Add new how-to guide for monitoring simulations** ([#1649](https://github." -"com/adap/flower/pull/1649))" +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" msgstr "" -#: ../../source/ref-changelog.md:494 +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 msgid "" -"We now have a documentation guide to help users monitor their performance " -"during simulations." +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" msgstr "" -#: ../../source/ref-changelog.md:496 +#: ../../source/tutorial-quickstart-ios.rst:171 msgid "" -"**Add training metrics to** `History` **object during simulations** ([#1696]" -"(https://github.com/adap/flower/pull/1696))" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." msgstr "" -#: ../../source/ref-changelog.md:498 +#: ../../source/tutorial-quickstart-ios.rst:177 msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training metrics, " -"but previous releases did not save the results in the `History` object. This " -"is now the case!" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." msgstr "" -#: ../../source/ref-changelog.md:500 +#: ../../source/tutorial-quickstart-jax.rst:-1 msgid "" -"**General improvements** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), [#1647](https://github." -"com/adap/flower/pull/1647), [#1471](https://github.com/adap/flower/" -"pull/1471), [#1648](https://github.com/adap/flower/pull/1648), [#1651]" -"(https://github.com/adap/flower/pull/1651), [#1652](https://github.com/adap/" -"flower/pull/1652), [#1653](https://github.com/adap/flower/pull/1653), [#1659]" -"(https://github.com/adap/flower/pull/1659), [#1665](https://github.com/adap/" -"flower/pull/1665), [#1670](https://github.com/adap/flower/pull/1670), [#1672]" -"(https://github.com/adap/flower/pull/1672), [#1677](https://github.com/adap/" -"flower/pull/1677), [#1684](https://github.com/adap/flower/pull/1684), [#1683]" -"(https://github.com/adap/flower/pull/1683), [#1686](https://github.com/adap/" -"flower/pull/1686), [#1682](https://github.com/adap/flower/pull/1682), [#1685]" -"(https://github.com/adap/flower/pull/1685), [#1692](https://github.com/adap/" -"flower/pull/1692), [#1705](https://github.com/adap/flower/pull/1705), [#1708]" -"(https://github.com/adap/flower/pull/1708), [#1711](https://github.com/adap/" -"flower/pull/1711), [#1713](https://github.com/adap/flower/pull/1713), [#1714]" -"(https://github.com/adap/flower/pull/1714), [#1718](https://github.com/adap/" -"flower/pull/1718), [#1716](https://github.com/adap/flower/pull/1716), [#1723]" -"(https://github.com/adap/flower/pull/1723), [#1735](https://github.com/adap/" -"flower/pull/1735), [#1678](https://github.com/adap/flower/pull/1678), [#1750]" -"(https://github.com/adap/flower/pull/1750), [#1753](https://github.com/adap/" -"flower/pull/1753), [#1736](https://github.com/adap/flower/pull/1736), [#1766]" -"(https://github.com/adap/flower/pull/1766), [#1760](https://github.com/adap/" -"flower/pull/1760), [#1775](https://github.com/adap/flower/pull/1775), [#1776]" -"(https://github.com/adap/flower/pull/1776), [#1777](https://github.com/adap/" -"flower/pull/1777), [#1779](https://github.com/adap/flower/pull/1779), [#1784]" -"(https://github.com/adap/flower/pull/1784), [#1773](https://github.com/adap/" -"flower/pull/1773), [#1755](https://github.com/adap/flower/pull/1755), [#1789]" -"(https://github.com/adap/flower/pull/1789), [#1788](https://github.com/adap/" -"flower/pull/1788), [#1798](https://github.com/adap/flower/pull/1798), [#1799]" -"(https://github.com/adap/flower/pull/1799), [#1739](https://github.com/adap/" -"flower/pull/1739), [#1800](https://github.com/adap/flower/pull/1800), [#1804]" -"(https://github.com/adap/flower/pull/1804), [#1805](https://github.com/adap/" -"flower/pull/1805))" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." msgstr "" -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" msgstr "" -#: ../../source/ref-changelog.md:514 +#: ../../source/tutorial-quickstart-jax.rst:9 msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, `Daniel " -"J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" +"이 튜토리얼에서는 Flower를 사용하여 기존 JAX 워크로드의 연합 버전을 구축하는 방법을 보여드립니다. JAX를 사용해 " +"scikit-learn 데이터 세트에서 선형 회귀 모델을 훈련하고 있습니다. 예제는 '파이토치 - Centralized에서 " +"Federated으로 `_ 워크스루와 유사하게 구성하겠습니다. 먼저, `JAX를 사용한 선형 회귀 " +"`_" +" 튜토리얼`을 기반으로 centralized 학습 접근 방식을 구축합니다. 그런 다음 centralized 트레이닝 코드를 기반으로" +" federated 방식으로 트레이닝을 실행합니다." -#: ../../source/ref-changelog.md:518 +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" msgstr "" +"JAX 예제 빌드를 시작하기 전에 :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, " +":code:`flwr` 패키지를 설치해야 합니다:" -#: ../../source/ref-changelog.md:520 +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "JAX를 사용한 선형 회귀" + +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be used " -"to identify which workload a task belongs to. It also supports a new " -"`group_id` that can be used, for example, to indicate the current training " -"round. Both the `workload_id` and `group_id` enable client nodes to decide " -"whether they want to handle a task or not." +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." msgstr "" +"먼저 :code:`선형 회귀` 모델을 기반으로 하는 중앙 집중식 훈련 코드에 대한 간략한 설명부터 시작하겠습니다. 더 자세한 설명을" +" 원하시면 공식 `JAX 문서 `_를 참조하세요." -#: ../../source/ref-changelog.md:522 +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy msgid "" -"**Make Driver API and Fleet API address configurable** ([#1637](https://" -"github.com/adap/flower/pull/1637))" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" +"전통적인(중앙 집중식) 선형 회귀 훈련에 필요한 모든 구성 요소가 포함된 :code:`jax_training.py`라는 새 파일을 " +"생성해 보겠습니다. 먼저, JAX 패키지인 :code:`jax`와 :code:`jaxlib`를 가져와야 합니다. 또한 데이터 세트에" +" :code:`make_regression`을 사용하고 데이터 세트를 학습 및 테스트 세트로 분할하기 위해 " +":code:`train_test_split`을 사용하므로 :code:`sklearn`을 가져와야 합니다. 연합 학습을 위해 아직 " +":code:`flwr` 패키지를 가져오지 않은 것을 볼 수 있습니다. 이 작업은 나중에 수행됩니다." + +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr "code:`load_data()` 함수는 앞서 언급한 트레이닝 및 테스트 세트를 로드합니다." + +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "모델 아키텍처(매우 간단한 :code:`선형 회귀` 모델)는 :code:`load_model()`에 정의되어 있습니다." + +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." msgstr "" +"이제 훈련 집합을 반복하고 각 훈련 예제 배치에 대해 손실을 측정하는(함수 :code:`loss_fn()`) 훈련(함수 " +":code:`train()`)을 정의해야 합니다. JAX는 :code:`grad()` 함수(:code:`main()` 함수에 " +"정의되고 :code:`train()`에서 호출됨)로 파생물을 취하므로 손실 함수는 분리되어 있습니다." -#: ../../source/ref-changelog.md:524 +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) can " -"now configure the server address of both Driver API (via `--driver-api-" -"address`) and Fleet API (via `--fleet-api-address`) when starting:" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." msgstr "" +"모델의 평가는 :code:`evaluation()` 함수에 정의되어 있습니다. 이 함수는 모든 테스트 예제를 가져와 선형 회귀 " +"모델의 손실을 측정합니다." -#: ../../source/ref-changelog.md:526 +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." msgstr "" +"데이터 로딩, 모델 아키텍처, 훈련 및 평가를 정의했으므로 이제 모든 것을 종합하여 JAX를 사용 모델을 훈련할 수 있습니다. 이미" +" 언급했듯이 :code:`jax.grad()` 함수는 :code:`main()`에 정의되어 :code:`train()`에 " +"전달됩니다." -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "" +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "이제 (중앙 집중식) JAX 선형 회귀 워크로드를 실행할 수 있습니다:" -#: ../../source/ref-changelog.md:530 +#: ../../source/tutorial-quickstart-jax.rst:132 msgid "" -"**Add new example of Federated Learning using fastai and Flower** ([#1598]" -"(https://github.com/adap/flower/pull/1598))" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." msgstr "" +"지금까지는 JAX를 사용해 본 적이 있다면 이 모든 것이 상당히 익숙해 보일 것입니다. 다음 단계로 넘어가서 우리가 구축한 것을 " +"사용하여 하나의 서버와 두 개의 클라이언트로 구성된 간단한 연합 학습 시스템을 만들어 보겠습니다." -#: ../../source/ref-changelog.md:532 +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "JAX와 Flower의 만남" + +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" +"기존 워크로드를 연합하는 개념은 항상 동일하고 이해하기 쉽습니다. 서버*를 시작한 다음 *서버*에 연결된 *클라이언트*에 대해 " +":code:`jax_training.py`의 코드를 사용해야 합니다. *서버*는 모델 파라미터를 클라이언트로 전송합니다. " +"클라이언트는 학습을 실행하고 파라미터를 업데이트합니다. 업데이트된 파라미터는 *서버*로 다시 전송되며, 수신된 모든 파라미터 " +"업데이트의 평균을 구합니다. 이는 연합 학습 프로세스의 한 라운드를 설명하며, 이 과정을 여러 라운드에 걸쳐 반복합니다." + +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples/" -"quickstart-fastai)." +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" msgstr "" +"마지막으로, :code:`client.py`에서 *client* 로직을 정의하고 :code:`jax_training.py`에서 " +"이전에 정의한 JAX 교육을 기반으로 빌드합니다. *클라이언트*는 :code:`flwr`을 가져와야 하며, JAX 모델의 파라미터를" +" 업데이트하기 위해 :code:`jax` 및 :code:`jaxlib`도 가져와야 합니다:" -#: ../../source/ref-changelog.md:534 +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" +"Flower *클라이언트*를 구현한다는 것은 기본적으로 :code:`flwr.client.Client` 또는 " +":code:`flwr.client.NumPyClient`의 서브클래스를 구현하는 것을 의미합니다. 구현은 " +":code:`flwr.client.NumPyClient`를 기반으로 하며, 이를 :code:`FlowerClient`라고 부를 " +"것입니다. :code:`NumPyClient`는 필요한 일부 보일러플레이를 피할 수 있기 때문에 NumPy 상호 운용성이 좋은 " +"프레임워크(예: JAX)를 사용하는 경우 :code:`Client`보다 구현하기가 약간 더 쉽습니다. " +"code:`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 " +"테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 합니다:" + +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" +msgstr ":code:`set_parameters (선택사항)`" + +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "매개 변수를 NumPy :code:`ndarray`로 변환" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "업데이트된 로컬 모델 파라미터를 가져와 서버로 반환합니다" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "로컬 손실을 서버로 반환합니다" + +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest " -"versions of Android** ([#1603](https://github.com/adap/flower/pull/1603))" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." msgstr "" +"어려운 부분은 JAX 모델 매개변수를 :code:`DeviceArray`에서 :code:`NumPy ndarray`로 변환하여 " +"`NumPyClient`와 호환되도록 하는 것입니다." -#: ../../source/ref-changelog.md:536 +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy msgid "" -"The Android code example has received a substantial update: the project is " -"compatible with Flower 1.0 (and later), the UI received a full refresh, and " -"the project is updated to be compatible with newer Android tooling." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." msgstr "" +"두 개의 :code:`NumPyClient` 메서드인 :code:`fit`과 :code:`evaluate`는 이전에 " +":code:`jax_training.py`에 정의된 함수 :code:`train()`과 :code:`evaluate()`를 " +"사용합니다. 따라서 여기서 우리가 실제로 하는 일은 이미 정의된 함수 중 훈련과 평가를 위해 호출할 함수를 " +":code:`NumPyClient` 서브클래스를 통해 Flower에게 알려주는 것입니다. 전달되는 데이터 유형을 더 잘 이해할 수 " +"있도록 유형 type annotation을 포함했습니다." -#: ../../source/ref-changelog.md:538 +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "연합 프로세스를 정의했으면 이제 실행할 수 있습니다." + +#: ../../source/tutorial-quickstart-jax.rst:315 msgid "" -"**Add new `FedProx` strategy** ([#1619](https://github.com/adap/flower/" -"pull/1619))" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" msgstr "" +"를 입력하고(그 전에 서버가 계속 실행 중인지 확인하세요) 두 클라이언트에서 연합 학습을 실행하는 JAX 프로젝트를 확인합니다. " +"축하합니다!" -#: ../../source/ref-changelog.md:540 +#: ../../source/tutorial-quickstart-jax.rst:321 msgid "" -"This [strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/" -"strategy/fedprox.py) is almost identical to [`FedAvg`](https://github.com/" -"adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py), but helps " -"users replicate what is described in this [paper](https://arxiv.org/" -"abs/1812.06127). It essentially adds a parameter called `proximal_mu` to " -"regularize the local models with respect to the global models." +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." msgstr "" +"이 예제의 소스 코드는 시간이 지남에 따라 개선되었으며 여기에서 확인할 수 있습니다: 'Quickstart JAX " +"`_. 두 " +"클라이언트가 동일한 데이터 세트를 로드하기 때문에 이 예제는 다소 단순화되어 있습니다." -#: ../../source/ref-changelog.md:542 +#: ../../source/tutorial-quickstart-jax.rst:325 msgid "" -"**Add new metrics to telemetry events** ([#1640](https://github.com/adap/" -"flower/pull/1640))" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" msgstr "" +"이제 이 주제를 더 자세히 살펴볼 준비가 되었습니다. 더 정교한 모델을 사용하거나 다른 데이터 집합을 사용해 보는 것은 어떨까요? " +"클라이언트를 더 추가하는 것은 어떨까요?" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "빠른 시작" -#: ../../source/ref-changelog.md:544 +#: ../../source/tutorial-quickstart-mlx.rst:6 msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:546 +#: ../../source/tutorial-quickstart-mlx.rst:10 msgid "" -"**Add new custom strategy tutorial section** [#1623](https://github.com/adap/" -"flower/pull/1623)" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:548 +#: ../../source/tutorial-quickstart-mlx.rst:25 msgid "" -"The Flower tutorial now has a new section that covers implementing a custom " -"strategy from scratch: [Open in Colab](https://colab.research.google.com/" -"github/adap/flower/blob/main/doc/source/tutorial-build-a-strategy-from-" -"scratch-pytorch.ipynb)" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:550 -msgid "" -"**Add new custom serialization tutorial section** ([#1622](https://github." -"com/adap/flower/pull/1622))" +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" msgstr "" -#: ../../source/ref-changelog.md:552 +#: ../../source/tutorial-quickstart-mlx.rst:102 msgid "" -"The Flower tutorial now has a new section that covers custom serialization: " -"[Open in Colab](https://colab.research.google.com/github/adap/flower/blob/" -"main/doc/source/tutorial-customize-the-client-pytorch.ipynb)" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/ref-changelog.md:554 +#: ../../source/tutorial-quickstart-mlx.rst:116 msgid "" -"**General improvements** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), [#1636](https://github." -"com/adap/flower/pull/1636), [#1635](https://github.com/adap/flower/" -"pull/1635), [#1633](https://github.com/adap/flower/pull/1633), [#1632]" -"(https://github.com/adap/flower/pull/1632), [#1631](https://github.com/adap/" -"flower/pull/1631), [#1630](https://github.com/adap/flower/pull/1630), [#1627]" -"(https://github.com/adap/flower/pull/1627), [#1593](https://github.com/adap/" -"flower/pull/1593), [#1616](https://github.com/adap/flower/pull/1616), [#1615]" -"(https://github.com/adap/flower/pull/1615), [#1607](https://github.com/adap/" -"flower/pull/1607), [#1609](https://github.com/adap/flower/pull/1609), [#1608]" -"(https://github.com/adap/flower/pull/1608), [#1603](https://github.com/adap/" -"flower/pull/1603), [#1590](https://github.com/adap/flower/pull/1590), [#1580]" -"(https://github.com/adap/flower/pull/1580), [#1599](https://github.com/adap/" -"flower/pull/1599), [#1600](https://github.com/adap/flower/pull/1600), [#1601]" -"(https://github.com/adap/flower/pull/1601), [#1597](https://github.com/adap/" -"flower/pull/1597), [#1595](https://github.com/adap/flower/pull/1595), [#1591]" -"(https://github.com/adap/flower/pull/1591), [#1588](https://github.com/adap/" -"flower/pull/1588), [#1589](https://github.com/adap/flower/pull/1589), [#1587]" -"(https://github.com/adap/flower/pull/1587), [#1573](https://github.com/adap/" -"flower/pull/1573), [#1581](https://github.com/adap/flower/pull/1581), [#1578]" -"(https://github.com/adap/flower/pull/1578), [#1574](https://github.com/adap/" -"flower/pull/1574), [#1572](https://github.com/adap/flower/pull/1572), [#1586]" -"(https://github.com/adap/flower/pull/1586))" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" msgstr "" -#: ../../source/ref-changelog.md:558 +#: ../../source/tutorial-quickstart-mlx.rst:157 msgid "" -"**Updated documentation** ([#1629](https://github.com/adap/flower/" -"pull/1629), [#1628](https://github.com/adap/flower/pull/1628), [#1620]" -"(https://github.com/adap/flower/pull/1620), [#1618](https://github.com/adap/" -"flower/pull/1618), [#1617](https://github.com/adap/flower/pull/1617), [#1613]" -"(https://github.com/adap/flower/pull/1613), [#1614](https://github.com/adap/" -"flower/pull/1614))" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" msgstr "" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 +#: ../../source/tutorial-quickstart-mlx.rst:180 msgid "" -"As usual, the documentation has improved quite a bit. It is another step in " -"our effort to make the Flower documentation the best documentation of any " -"project. Stay tuned and as always, feel free to provide feedback!" +"We also define some utility functions to test our model and to iterate " +"over batches." msgstr "" -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." msgstr "" -#: ../../source/ref-changelog.md:572 -msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L. " -"Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" msgstr "" -#: ../../source/ref-changelog.md:576 +#: ../../source/tutorial-quickstart-mlx.rst:219 msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** ([#1497](https://github.com/" -"adap/flower/pull/1497), [#1552](https://github.com/adap/flower/pull/1552))" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" msgstr "" -#: ../../source/ref-changelog.md:578 +#: ../../source/tutorial-quickstart-mlx.rst:228 msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in this " -"series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-pack-" -"fedavg-mnist-cnn/)" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" msgstr "" -#: ../../source/ref-changelog.md:580 +#: ../../source/tutorial-quickstart-mlx.rst:243 msgid "" -"**Improve GPU support in simulations** ([#1555](https://github.com/adap/" -"flower/pull/1555))" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" msgstr "" -#: ../../source/ref-changelog.md:582 +#: ../../source/tutorial-quickstart-mlx.rst:259 msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated to " -"improve GPU support. The update includes some of the hard-earned lessons " -"from scaling simulations in GPU cluster environments. New defaults make " -"running GPU-based simulations substantially more robust." +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." msgstr "" -#: ../../source/ref-changelog.md:584 -msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** ([#1527](https://" -"github.com/adap/flower/pull/1527), [#1558](https://github.com/adap/flower/" -"pull/1558))" +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" msgstr "" -#: ../../source/ref-changelog.md:586 +#: ../../source/tutorial-quickstart-mlx.rst:272 msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to use " -"on GPU instances. We listened and made improvements to all of our Jupyter " -"notebooks! Check out the updated notebooks here:" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." msgstr "" -#: ../../source/ref-changelog.md:588 -msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework/" -"tutorial-get-started-with-flower-pytorch.html)" +#: ../../source/tutorial-quickstart-mlx.rst:277 +msgid "Putting everything together we have:" msgstr "" -#: ../../source/ref-changelog.md:589 +#: ../../source/tutorial-quickstart-mlx.rst:331 msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework/tutorial-" -"use-a-federated-learning-strategy-pytorch.html)" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." msgstr "" -#: ../../source/ref-changelog.md:590 +#: ../../source/tutorial-quickstart-mlx.rst:363 msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a-" -"strategy-from-scratch-pytorch.html)" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." msgstr "" -#: ../../source/ref-changelog.md:591 +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" -"the-client-pytorch.html)" +"Congratulations! You've successfully built and run your first federated " +"learning system." msgstr "" -#: ../../source/ref-changelog.md:593 +#: ../../source/tutorial-quickstart-mlx.rst:390 msgid "" -"**Introduce optional telemetry** ([#1533](https://github.com/adap/flower/" -"pull/1533), [#1544](https://github.com/adap/flower/pull/1544), [#1584]" -"(https://github.com/adap/flower/pull/1584))" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:595 +#: ../../source/tutorial-quickstart-pandas.rst:-1 msgid "" -"After a [request for feedback](https://github.com/adap/flower/issues/1534) " -"from the community, the Flower open-source project introduces optional " -"collection of *anonymous* usage metrics to make well-informed decisions to " -"improve Flower. Doing this enables the Flower team to understand how Flower " -"is used and what challenges users might face." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." msgstr "" -#: ../../source/ref-changelog.md:597 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.** " -"Staying true to this statement, Flower makes it easy to disable telemetry " -"for users who do not want to share anonymous usage metrics. [Read more.]" -"(https://flower.ai/docs/telemetry.html)." +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" msgstr "" -#: ../../source/ref-changelog.md:599 -msgid "" -"**Introduce (experimental) Driver API** ([#1520](https://github.com/adap/" -"flower/pull/1520), [#1525](https://github.com/adap/flower/pull/1525), [#1545]" -"(https://github.com/adap/flower/pull/1545), [#1546](https://github.com/adap/" -"flower/pull/1546), [#1550](https://github.com/adap/flower/pull/1550), [#1551]" -"(https://github.com/adap/flower/pull/1551), [#1567](https://github.com/adap/" -"flower/pull/1567))" +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" msgstr "" -#: ../../source/ref-changelog.md:601 +#: ../../source/tutorial-quickstart-pandas.rst:11 msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API " -"will be the abstraction that many upcoming features will be built on - and " -"you can start building those things now, too." +"Please refer to the `full code example " +"`_ " +"to learn more." msgstr "" -#: ../../source/ref-changelog.md:603 +#: ../../source/tutorial-quickstart-pytorch.rst:-1 msgid "" -"The Driver API also enables a new execution mode in which the server runs " -"indefinitely. Multiple individual workloads can run concurrently and start " -"and stop their execution independent of the server. This is especially " -"useful for users who want to deploy Flower in production." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." msgstr "" -#: ../../source/ref-changelog.md:605 +#: ../../source/tutorial-quickstart-pytorch.rst:6 msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward to " -"you feedback!" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:607 +#: ../../source/tutorial-quickstart-pytorch.rst:11 msgid "" -"Please note: *The Driver API is still experimental and will likely change " -"significantly over time.*" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:609 +#: ../../source/tutorial-quickstart-pytorch.rst:26 msgid "" -"**Add new Federated Analytics with Pandas example** ([#1469](https://github." -"com/adap/flower/pull/1469), [#1535](https://github.com/adap/flower/" -"pull/1535))" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:611 +#: ../../source/tutorial-quickstart-pytorch.rst:117 msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics " -"with Pandas and Flower. You can find it here: [quickstart-pandas](https://" -"github.com/adap/flower/tree/main/examples/quickstart-pandas)." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." msgstr "" -#: ../../source/ref-changelog.md:613 +#: ../../source/tutorial-quickstart-pytorch.rst:152 msgid "" -"**Add new strategies: Krum and MultiKrum** ([#1481](https://github.com/adap/" -"flower/pull/1481))" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" msgstr "" -#: ../../source/ref-changelog.md:615 +#: ../../source/tutorial-quickstart-pytorch.rst:177 msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum and " -"MultiKrum in their workloads." +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" msgstr "" -#: ../../source/ref-changelog.md:617 +#: ../../source/tutorial-quickstart-pytorch.rst:226 msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** ([#1495](https://" -"github.com/adap/flower/pull/1495))" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/ref-changelog.md:619 +#: ../../source/tutorial-quickstart-pytorch.rst:282 msgid "" -"The C++ code example has received a substantial update to make it compatible " -"with the latest version of Flower." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:621 +#: ../../source/tutorial-quickstart-pytorch.rst:309 msgid "" -"**General improvements** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), [#1506](https://github." -"com/adap/flower/pull/1506), [#1514](https://github.com/adap/flower/" -"pull/1514), [#1522](https://github.com/adap/flower/pull/1522), [#1523]" -"(https://github.com/adap/flower/pull/1523), [#1526](https://github.com/adap/" -"flower/pull/1526), [#1528](https://github.com/adap/flower/pull/1528), [#1547]" -"(https://github.com/adap/flower/pull/1547), [#1549](https://github.com/adap/" -"flower/pull/1549), [#1560](https://github.com/adap/flower/pull/1560), [#1564]" -"(https://github.com/adap/flower/pull/1564), [#1566](https://github.com/adap/" -"flower/pull/1566))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." msgstr "" -#: ../../source/ref-changelog.md:625 +#: ../../source/tutorial-quickstart-pytorch.rst:348 msgid "" -"**Updated documentation** ([#1494](https://github.com/adap/flower/" -"pull/1494), [#1496](https://github.com/adap/flower/pull/1496), [#1500]" -"(https://github.com/adap/flower/pull/1500), [#1503](https://github.com/adap/" -"flower/pull/1503), [#1505](https://github.com/adap/flower/pull/1505), [#1524]" -"(https://github.com/adap/flower/pull/1524), [#1518](https://github.com/adap/" -"flower/pull/1518), [#1519](https://github.com/adap/flower/pull/1519), [#1515]" -"(https://github.com/adap/flower/pull/1515))" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:629 +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "튜토리얼" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 msgid "" -"One highlight is the new [first time contributor guide](https://flower.ai/" -"docs/first-time-contributors.html): if you've never contributed on GitHub " -"before, this is the perfect place to start!" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" msgstr "" -#: ../../source/ref-changelog.md:639 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:641 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, `danielnugraha`, " -"`edogab33`" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/ref-changelog.md:645 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 msgid "" -"**Introduce Differential Privacy wrappers (preview)** ([#1357](https://" -"github.com/adap/flower/pull/1357), [#1460](https://github.com/adap/flower/" -"pull/1460))" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/ref-changelog.md:647 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 msgid "" -"The first (experimental) preview of pluggable Differential Privacy wrappers " -"enables easy configuration and usage of differential privacy (DP). The " -"pluggable DP wrappers enable framework-agnostic **and** strategy-agnostic " -"usage of both client-side DP and server-side DP. Head over to the Flower " -"docs, a new explainer goes into more detail." +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -#: ../../source/ref-changelog.md:649 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 msgid "" -"**New iOS CoreML code example** ([#1289](https://github.com/adap/flower/" -"pull/1289))" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:651 +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can be " -"built for iOS. The code example contains both Flower iOS SDK components that " -"can be used for many tasks, and one task example running on CoreML." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." msgstr "" -#: ../../source/ref-changelog.md:653 -msgid "" -"**New FedMedian strategy** ([#1461](https://github.com/adap/flower/" -"pull/1461))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" msgstr "" -#: ../../source/ref-changelog.md:655 +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by [Yin " -"et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." msgstr "" -#: ../../source/ref-changelog.md:657 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** ([#1493](https://" -"github.com/adap/flower/pull/1493))" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:659 +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 msgid "" -"All `Client` exceptions happening in the VCE are now logged by default and " -"not just exposed to the configured `Strategy` (via the `failures` argument)." +"Our example consists of one *server* and two *clients* all having the " +"same model." msgstr "" -#: ../../source/ref-changelog.md:661 +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 msgid "" -"**Improve Virtual Client Engine internals** ([#1401](https://github.com/adap/" -"flower/pull/1401), [#1453](https://github.com/adap/flower/pull/1453))" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." msgstr "" -#: ../../source/ref-changelog.md:663 +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE now " -"uses Ray 2.0 under the hood, the value type of the `client_resources` " -"dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" msgstr "" -#: ../../source/ref-changelog.md:665 -msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual Client " -"Engine**" +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" msgstr "" -#: ../../source/ref-changelog.md:667 -msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and " -"`NumPyClient`) methods." +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" msgstr "" -#: ../../source/ref-changelog.md:669 +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 msgid "" -"**Provide type information to packages using** `flwr` ([#1377](https://" -"github.com/adap/flower/pull/1377))" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" msgstr "" -#: ../../source/ref-changelog.md:671 -msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that the " -"package is typed. This enables typing support for projects or packages that " -"use `flwr` by enabling them to improve their code using static type checkers " -"like `mypy`." +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr "모델 매개변수." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/ref-changelog.md:673 -msgid "" -"**Updated code example** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid "``set_model_params()``" msgstr "" -#: ../../source/ref-changelog.md:675 -msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" msgstr "" -#: ../../source/ref-changelog.md:677 -msgid "" -"**Updated documentation** ([#1355](https://github.com/adap/flower/" -"pull/1355), [#1558](https://github.com/adap/flower/pull/1558), [#1379]" -"(https://github.com/adap/flower/pull/1379), [#1380](https://github.com/adap/" -"flower/pull/1380), [#1381](https://github.com/adap/flower/pull/1381), [#1332]" -"(https://github.com/adap/flower/pull/1332), [#1391](https://github.com/adap/" -"flower/pull/1391), [#1403](https://github.com/adap/flower/pull/1403), [#1364]" -"(https://github.com/adap/flower/pull/1364), [#1409](https://github.com/adap/" -"flower/pull/1409), [#1419](https://github.com/adap/flower/pull/1419), [#1444]" -"(https://github.com/adap/flower/pull/1444), [#1448](https://github.com/adap/" -"flower/pull/1448), [#1417](https://github.com/adap/flower/pull/1417), [#1449]" -"(https://github.com/adap/flower/pull/1449), [#1465](https://github.com/adap/" -"flower/pull/1465), [#1467](https://github.com/adap/flower/pull/1467))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "``set_initial_params()``" msgstr "" -#: ../../source/ref-changelog.md:679 -msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" msgstr "" -#: ../../source/ref-changelog.md:681 +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 msgid "" -"**Restructured documentation** ([#1387](https://github.com/adap/flower/" -"pull/1387))" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" msgstr "" -#: ../../source/ref-changelog.md:683 +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 msgid "" -"The documentation has been restructured to make it easier to navigate. This " -"is just the first step in a larger effort to make the Flower documentation " -"the best documentation of any project ever. Stay tuned!" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." msgstr "" -#: ../../source/ref-changelog.md:685 +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 msgid "" -"**Open in Colab button** ([#1389](https://github.com/adap/flower/pull/1389))" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." msgstr "" -#: ../../source/ref-changelog.md:687 +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a new " -"`Open in Colab` button. No need to install anything on your local machine, " -"you can now use and learn about Flower in your browser, it's only a single " -"click away." +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." msgstr "" -#: ../../source/ref-changelog.md:689 +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), [#1472](https://github." -"com/adap/flower/pull/1472), [#1473](https://github.com/adap/flower/" -"pull/1473), [#1474](https://github.com/adap/flower/pull/1474), [#1475]" -"(https://github.com/adap/flower/pull/1475))" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/ref-changelog.md:691 +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters (선택사항)`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved (many " -"small changes and fixes)." +"update the local model weights with the parameters received from the " +"server" msgstr "" -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +msgid "is directly imported with ``utils.set_model_params()``" msgstr "" -#: ../../source/ref-changelog.md:699 -msgid "Highlights" +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" msgstr "" -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#, fuzzy +msgid "return the updated local model weights" +msgstr "현재 로컬 모델 파라미터를 반환합니다." + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 msgid "" -"Tons of small API cleanups resulting in a more coherent developer experience" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub Contributors](https://github." -"com/adap/flower/graphs/contributors) order):" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." msgstr "" -#: ../../source/ref-changelog.md:710 +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 msgid "" -"[@rtaiello](https://github.com/rtaiello), [@g-pichler](https://github.com/g-" -"pichler), [@rob-luke](https://github.com/rob-luke), [@andreea-zaharia]" -"(https://github.com/andreea-zaharia), [@kinshukdua](https://github.com/" -"kinshukdua), [@nfnt](https://github.com/nfnt), [@tatiana-s](https://github." -"com/tatiana-s), [@TParcollet](https://github.com/TParcollet), [@vballoli]" -"(https://github.com/vballoli), [@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), [@hei411](https://github." -"com/hei411), [@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), [@Rubiel1](https://github." -"com/Rubiel1), [@FANTOME-PAN](https://github.com/FANTOME-PAN), [@Rono-BC]" -"(https://github.com/Rono-BC), [@lbhm](https://github.com/lbhm), [@sishtiaq]" -"(https://github.com/sishtiaq), [@remde](https://github.com/remde), [@Jueun-" -"Park](https://github.com/Jueun-Park), [@architjen](https://github.com/" -"architjen), [@PratikGarai](https://github.com/PratikGarai), [@mrinaald]" -"(https://github.com/mrinaald), [@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), [@sancarlim](https://github." -"com/sancarlim), [@gubertoli](https://github.com/gubertoli), [@Vingt100]" -"(https://github.com/Vingt100), [@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), [@jafermarq](https://github.com/" -"jafermarq), [@sisco0](https://github.com/sisco0), [@akhilmathurs](https://" -"github.com/akhilmathurs), [@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), [@pedropgusmao]" -"(https://github.com/pedropgusmao), [@tanertopal](https://github.com/" -"tanertopal), [@danieljanes](https://github.com/danieljanes)." +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." msgstr "" -#: ../../source/ref-changelog.md:714 -msgid "" -"**All arguments must be passed as keyword arguments** ([#1338](https://" -"github.com/adap/flower/pull/1338))" +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "``server.py``, import Flower and start the server:" msgstr "" -#: ../../source/ref-changelog.md:716 +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not longer " -"supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword for " -"each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", client=FlowerClient())`)." +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." msgstr "" -#: ../../source/ref-changelog.md:718 +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` ([#1317](https://github.com/adap/flower/" -"pull/1317))" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." msgstr "" -#: ../../source/ref-changelog.md:720 +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": 600.0}" -"`, `start_server` and `start_simulation` now expect a configuration object " -"of type `flwr.server.ServerConfig`. `ServerConfig` takes the same arguments " -"that as the previous config dict, but it makes writing type-safe code easier " -"and the default parameters values more transparent." +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" msgstr "" -#: ../../source/ref-changelog.md:722 +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 msgid "" -"**Rename built-in strategy parameters for clarity** ([#1334](https://github." -"com/adap/flower/pull/1334))" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" msgstr "" -#: ../../source/ref-changelog.md:724 +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" msgstr "" -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." msgstr "" -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/tutorial-quickstart-tensorflow.rst:6 msgid "" -"**Update default arguments of built-in strategies** ([#1278](https://github." -"com/adap/flower/pull/1278))" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:732 +#: ../../source/tutorial-quickstart-tensorflow.rst:11 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently available " -"clients for training and evaluation. Projects that relied on the previous " -"default values can get the previous behaviour by initializing the strategy " -"in the following way:" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/ref-changelog.md:736 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` ([#1334](https://github." -"com/adap/flower/pull/1334))" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/ref-changelog.md:738 +#: ../../source/tutorial-quickstart-tensorflow.rst:141 msgid "" -"The `Strategy` method `evaluate` now receives the current round of federated " -"learning/evaluation as the first parameter." +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/ref-changelog.md:740 +#: ../../source/tutorial-quickstart-tensorflow.rst:170 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/ref-changelog.md:742 +#: ../../source/tutorial-quickstart-tensorflow.rst:203 msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), and " -"(3) a config dictionary (`config`)." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/ref-changelog.md:744 +#: ../../source/tutorial-quickstart-tensorflow.rst:234 msgid "" -"**Rename** `rnd` **to** `server_round` ([#1321](https://github.com/adap/" -"flower/pull/1321))" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/ref-changelog.md:746 +#: ../../source/tutorial-quickstart-tensorflow.rst:270 msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. To " -"improve reaability and avoid confusion with *random*, this parameter has " -"been renamed from `rnd` to `server_round`." +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/ref-changelog.md:748 +#: ../../source/tutorial-quickstart-tensorflow.rst:282 msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` ([#1273](https://github.com/" -"adap/flower/pull/1273))" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/ref-changelog.md:750 +#: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" -"The experimental package `flwr.dataset` was migrated to Flower Baselines." +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." msgstr "" -#: ../../source/ref-changelog.md:752 -msgid "" -"**Remove experimental strategies** ([#1280](https://github.com/adap/flower/" -"pull/1280))" +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" msgstr "" -#: ../../source/ref-changelog.md:754 +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." msgstr "" -#: ../../source/ref-changelog.md:756 +#: ../../source/tutorial-quickstart-xgboost.rst:21 msgid "" -"**Rename** `Weights` **to** `NDArrays` ([#1258](https://github.com/adap/" -"flower/pull/1258), [#1259](https://github.com/adap/flower/pull/1259))" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." msgstr "" -#: ../../source/ref-changelog.md:758 -msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "Why federated XGBoost?" msgstr "" -#: ../../source/ref-changelog.md:760 +#: ../../source/tutorial-quickstart-xgboost.rst:27 msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** `start_server` " -"([#1258](https://github.com/adap/flower/pull/1258), [#1259](https://github." -"com/adap/flower/pull/1259))" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." msgstr "" -#: ../../source/ref-changelog.md:762 +#: ../../source/tutorial-quickstart-xgboost.rst:31 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been a " -"historic artefact, in this release it is finally gone for good." +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." msgstr "" -#: ../../source/ref-changelog.md:764 +#: ../../source/tutorial-quickstart-xgboost.rst:36 msgid "" -"**Make** `get_parameters` **configurable** ([#1242](https://github.com/adap/" -"flower/pull/1242))" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." msgstr "" -#: ../../source/ref-changelog.md:766 -msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" msgstr "" -#: ../../source/ref-changelog.md:768 +#: ../../source/tutorial-quickstart-xgboost.rst:48 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." msgstr "" -#: ../../source/ref-changelog.md:770 +#: ../../source/tutorial-quickstart-xgboost.rst:51 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the consistency " -"between `start_simulation` and `start_server` and makes transitioning " -"between the two easier." +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/tutorial-quickstart-xgboost.rst:57 msgid "" -"**Support Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/tutorial-quickstart-xgboost.rst:67 msgid "" -"The previous Flower release introduced experimental support for Python 3.10, " -"this release declares Python 3.10 support as stable." +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." msgstr "" -#: ../../source/ref-changelog.md:778 +#: ../../source/tutorial-quickstart-xgboost.rst:71 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** ([#1260]" -"(https://github.com/adap/flower/pull/1260), [#1277](https://github.com/adap/" -"flower/pull/1277))" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" msgstr "" -#: ../../source/ref-changelog.md:780 -msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that " -"implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" msgstr "" -#: ../../source/ref-changelog.md:782 +#: ../../source/tutorial-quickstart-xgboost.rst:101 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` ([#1281]" -"(https://github.com/adap/flower/pull/1281))" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" msgstr "" -#: ../../source/ref-changelog.md:784 +#: ../../source/tutorial-quickstart-xgboost.rst:115 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server` " -"instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the " -"Virtual Client Engine." +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" msgstr "" -#: ../../source/ref-changelog.md:786 +#: ../../source/tutorial-quickstart-xgboost.rst:135 msgid "" -"**Update code examples** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), [#1282](https://github." -"com/adap/flower/pull/1282))" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." msgstr "" -#: ../../source/ref-changelog.md:788 +#: ../../source/tutorial-quickstart-xgboost.rst:149 msgid "" -"Many code examples received small or even large maintenance updates, among " -"them are" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" msgstr "" -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." msgstr "" -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" +#: ../../source/tutorial-quickstart-xgboost.rst:190 +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." msgstr "" -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" msgstr "" -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" +#: ../../source/tutorial-quickstart-xgboost.rst:197 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." msgstr "" -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." msgstr "" -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/tutorial-quickstart-xgboost.rst:236 msgid "" -"**Remove the obsolete simulation example** ([#1328](https://github.com/adap/" -"flower/pull/1328))" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." msgstr "" -#: ../../source/ref-changelog.md:799 +#: ../../source/tutorial-quickstart-xgboost.rst:278 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" msgstr "" -#: ../../source/ref-changelog.md:801 +#: ../../source/tutorial-quickstart-xgboost.rst:298 msgid "" -"**Update documentation** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), [#1251](https://github." -"com/adap/flower/pull/1251), [#1257](https://github.com/adap/flower/" -"pull/1257), [#1267](https://github.com/adap/flower/pull/1267), [#1268]" -"(https://github.com/adap/flower/pull/1268), [#1300](https://github.com/adap/" -"flower/pull/1300), [#1304](https://github.com/adap/flower/pull/1304), [#1305]" -"(https://github.com/adap/flower/pull/1305), [#1307](https://github.com/adap/" -"flower/pull/1307))" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." msgstr "" -#: ../../source/ref-changelog.md:803 +#: ../../source/tutorial-quickstart-xgboost.rst:330 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the `flwr." -"common` module in the API reference, includes support for markdown-based " -"documentation, migrates the changelog from `.rst` to `.md`, and fixes a " -"number of smaller details!" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." msgstr "" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" +#: ../../source/tutorial-quickstart-xgboost.rst:333 +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" msgstr "" -#: ../../source/ref-changelog.md:807 +#: ../../source/tutorial-quickstart-xgboost.rst:350 msgid "" -"Add round number to fit and evaluate log messages ([#1266](https://github." -"com/adap/flower/pull/1266))" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." msgstr "" -#: ../../source/ref-changelog.md:808 +#: ../../source/tutorial-quickstart-xgboost.rst:360 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example ([#847]" -"(https://github.com/adap/flower/pull/847))" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." msgstr "" -#: ../../source/ref-changelog.md:809 +#: ../../source/tutorial-quickstart-xgboost.rst:364 msgid "" -"Update developer tooling ([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), [#1301](https://github." -"com/adap/flower/pull/1301), [#1310](https://github.com/adap/flower/" -"pull/1310))" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 msgid "" -"Rename ProtoBuf messages to improve consistency ([#1214](https://github.com/" -"adap/flower/pull/1214), [#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/tutorial-quickstart-xgboost.rst:406 +msgid "Then, we start the server:" msgstr "" -#: ../../source/ref-changelog.md:816 -msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** ([#919](https://" -"github.com/adap/flower/pull/919), [#1127](https://github.com/adap/flower/" -"pull/1127), [#914](https://github.com/adap/flower/pull/914))" +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" msgstr "" -#: ../../source/ref-changelog.md:818 +#: ../../source/tutorial-quickstart-xgboost.rst:420 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how to " -"use [Flower Baselines](https://flower.ai/docs/using-baselines.html). With " -"this first preview release we're also inviting the community to [contribute " -"their own baselines](https://flower.ai/docs/baselines/how-to-contribute-" -"baselines.html)." +"You must be curious about how bagging aggregation works. Let's look into " +"the details." msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/tutorial-quickstart-xgboost.rst:422 msgid "" -"**C++ client SDK (preview) and code example** ([#1111](https://github.com/" -"adap/flower/pull/1111))" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/tutorial-quickstart-xgboost.rst:519 msgid "" -"Preview support for Flower clients written in C++. The C++ preview includes " -"a Flower client SDK and a quickstart code example that demonstrates a simple " -"C++ client using the SDK." +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/tutorial-quickstart-xgboost.rst:579 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** ([#1135]" -"(https://github.com/adap/flower/pull/1135))" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." msgstr "" -#: ../../source/ref-changelog.md:826 +#: ../../source/tutorial-quickstart-xgboost.rst:584 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due to " -"be released in October. This Flower release adds experimental support for " -"both Python versions." +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." msgstr "" -#: ../../source/ref-changelog.md:828 -msgid "" -"**Aggregate custom metrics through user-provided functions** ([#1144]" -"(https://github.com/adap/flower/pull/1144))" +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/tutorial-quickstart-xgboost.rst:664 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to " -"customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." msgstr "" -#: ../../source/ref-changelog.md:832 +#: ../../source/tutorial-quickstart-xgboost.rst:668 msgid "" -"**User-configurable round timeout** ([#1162](https://github.com/adap/flower/" -"pull/1162))" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." msgstr "" -#: ../../source/ref-changelog.md:834 -msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary contains a " -"`round_timeout` key (with a `float` value in seconds), the server will wait " -"*at least* `round_timeout` seconds before it closes the connection." +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" msgstr "" -#: ../../source/ref-changelog.md:836 +#: ../../source/tutorial-quickstart-xgboost.rst:675 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used at " -"the same time in all built-in strategies** ([#1091](https://github.com/adap/" -"flower/pull/1091))" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" msgstr "" -#: ../../source/ref-changelog.md:838 -msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., client-" -"side) and centralized evaluation (i.e., server-side) in the same round. " -"Federated evaluation can be disabled by setting `fraction_eval` to `0.0`." +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Cyclic training" msgstr "" -#: ../../source/ref-changelog.md:840 +#: ../../source/tutorial-quickstart-xgboost.rst:687 msgid "" -"**Two new Jupyter Notebook tutorials** ([#1141](https://github.com/adap/" -"flower/pull/1141))" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." msgstr "" -#: ../../source/ref-changelog.md:842 -msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain basic " -"and intermediate Flower features:" +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" msgstr "" -#: ../../source/ref-changelog.md:844 +#: ../../source/tutorial-quickstart-xgboost.rst:733 msgid "" -"*An Introduction to Federated Learning*: [Open in Colab](https://colab." -"research.google.com/github/adap/flower/blob/main/tutorials/Flower-1-Intro-to-" -"FL-PyTorch.ipynb)" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." msgstr "" -#: ../../source/ref-changelog.md:846 +#: ../../source/tutorial-quickstart-xgboost.rst:775 msgid "" -"*Using Strategies in Federated Learning*: [Open in Colab](https://colab." -"research.google.com/github/adap/flower/blob/main/tutorials/Flower-2-" -"Strategies-in-FL-PyTorch.ipynb)" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." msgstr "" -#: ../../source/ref-changelog.md:848 +#: ../../source/tutorial-quickstart-xgboost.rst:778 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** ([#1076]" -"(https://github.com/adap/flower/pull/1076))" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" msgstr "" -#: ../../source/ref-changelog.md:850 -msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" msgstr "" -#: ../../source/ref-changelog.md:852 +#: ../../source/tutorial-quickstart-xgboost.rst:842 msgid "" -"**New advanced PyTorch code example** ([#1007](https://github.com/adap/" -"flower/pull/1007))" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." msgstr "" -#: ../../source/ref-changelog.md:854 -msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +#: ../../source/tutorial-quickstart-xgboost.rst:873 +msgid "Customised centralised/distributed evaluation" msgstr "" -#: ../../source/ref-changelog.md:856 +#: ../../source/tutorial-quickstart-xgboost.rst:875 msgid "" -"**New JAX code example** ([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" msgstr "" -#: ../../source/ref-changelog.md:858 +#: ../../source/tutorial-quickstart-xgboost.rst:907 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." msgstr "" -#: ../../source/ref-changelog.md:862 +#: ../../source/tutorial-quickstart-xgboost.rst:911 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." msgstr "" -#: ../../source/ref-changelog.md:863 -msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +#: ../../source/tutorial-quickstart-xgboost.rst:916 +msgid "Flower simulation" msgstr "" -#: ../../source/ref-changelog.md:864 +#: ../../source/tutorial-quickstart-xgboost.rst:918 msgid "" -"New documentation for [implementing strategies](https://flower.ai/docs/" -"framework/how-to-implement-strategies.html) ([#1097](https://github.com/adap/" -"flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175))" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/tutorial-quickstart-xgboost.rst:954 msgid "" -"New mobile-friendly documentation theme ([#1174](https://github.com/adap/" -"flower/pull/1174))" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/tutorial-quickstart-xgboost.rst:1010 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) ([#1205](https://github.com/adap/" -"flower/pull/1205))" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." msgstr "" -#: ../../source/ref-changelog.md:870 -msgid "" -"**Remove deprecated support for Python 3.6** ([#871](https://github.com/adap/" -"flower/pull/871))" +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +msgid "Then, we define the strategies and other hyper-parameters:" msgstr "" -#: ../../source/ref-changelog.md:871 +#: ../../source/tutorial-quickstart-xgboost.rst:1065 msgid "" -"**Remove deprecated KerasClient** ([#857](https://github.com/adap/flower/" -"pull/857))" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/tutorial-quickstart-xgboost.rst:1085 msgid "" -"**Remove deprecated no-op extra installs** ([#973](https://github.com/adap/" -"flower/pull/973))" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" msgstr "" -#: ../../source/ref-changelog.md:873 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" msgstr "" -#: ../../source/ref-changelog.md:874 +#: ../../source/tutorial-quickstart-xgboost.rst:1128 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** ([#1107]" -"(https://github.com/adap/flower/pull/1107))" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/tutorial-quickstart-xgboost.rst:1175 msgid "" -"**Remove deprecated DefaultStrategy strategy** ([#1142](https://github.com/" -"adap/flower/pull/1142))" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." msgstr "" -#: ../../source/ref-changelog.md:876 -msgid "" -"**Remove deprecated support for eval_fn accuracy return value** ([#1142]" -"(https://github.com/adap/flower/pull/1142))" +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" msgstr "" -#: ../../source/ref-changelog.md:877 +#: ../../source/tutorial-quickstart-xgboost.rst:1234 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." msgstr "" -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +msgid "We also have an argument parser for simulation:" msgstr "" -#: ../../source/ref-changelog.md:883 -msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), [#872]" -"(https://github.com/adap/flower/pull/872), [#833](https://github.com/adap/" -"flower/pull/833), [#1036](https://github.com/adap/flower/pull/1036))" +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +msgid "This integrates all arguments for both client and server sides." msgstr "" -#: ../../source/ref-changelog.md:885 -msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) now " -"work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +msgid "Example commands" msgstr "" -#: ../../source/ref-changelog.md:887 +#: ../../source/tutorial-quickstart-xgboost.rst:1322 msgid "" -"**New Jupyter Notebook code example** ([#833](https://github.com/adap/flower/" -"pull/833))" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" msgstr "" -#: ../../source/ref-changelog.md:889 -msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower simulations " -"using the Virtual Client Engine through Jupyter Notebook (incl. Google " -"Colab)." +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +msgid "Then, on each client terminal, we start the clients:" msgstr "" -#: ../../source/ref-changelog.md:891 -msgid "" -"**Client properties (feature preview)** ([#795](https://github.com/adap/" -"flower/pull/795))" +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +msgid "To run the same experiment with Flower simulation:" msgstr "" -#: ../../source/ref-changelog.md:893 +#: ../../source/tutorial-quickstart-xgboost.rst:1341 msgid "" -"Clients can implement a new method `get_properties` to enable server-side " -"strategies to query client properties." +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." msgstr "" -#: ../../source/ref-changelog.md:895 -msgid "" -"**Experimental Android support with TFLite** ([#865](https://github.com/adap/" -"flower/pull/865))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" msgstr "" -#: ../../source/ref-changelog.md:897 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has become a " -"lot easier." +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." msgstr "" -#: ../../source/ref-changelog.md:899 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 msgid "" -"The example uses TFLite on the client side, along with a new `FedAvgAndroid` " -"strategy. The Android client and `FedAvgAndroid` are still experimental, but " -"they are a first step towards a fully-fledged Android SDK and a unified " -"`FedAvg` implementation that integrated the new functionality from " -"`FedAvgAndroid`." +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." msgstr "" -#: ../../source/ref-changelog.md:901 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy msgid "" -"**Make gRPC keepalive time user-configurable and decrease default keepalive " -"time** ([#1069](https://github.com/adap/flower/pull/1069))" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/ref-changelog.md:903 -msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, Microsoft " -"Azure). Users can configure the keepalive time to customize the gRPC stack " -"based on specific requirements." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" msgstr "" -#: ../../source/ref-changelog.md:905 -msgid "" -"**New differential privacy example using Opacus and PyTorch** ([#805]" -"(https://github.com/adap/flower/pull/805))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" msgstr "" -#: ../../source/ref-changelog.md:907 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 msgid "" -"A new code example (`opacus`) demonstrates differentially-private federated " -"learning with Opacus, PyTorch, and Flower." +"Before we begin with the actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/ref-changelog.md:909 -msgid "" -"**New Hugging Face Transformers code example** ([#863](https://github.com/" -"adap/flower/pull/863))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" msgstr "" -#: ../../source/ref-changelog.md:911 -msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of Hugging " -"Face Transformers with Flower." +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" msgstr "" -#: ../../source/ref-changelog.md:913 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 msgid "" -"**New MLCube code example** ([#779](https://github.com/adap/flower/" -"pull/779), [#1034](https://github.com/adap/flower/pull/1034), [#1065]" -"(https://github.com/adap/flower/pull/1065), [#1090](https://github.com/adap/" -"flower/pull/1090))" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" msgstr "" -#: ../../source/ref-changelog.md:915 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube with " -"Flower." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/ref-changelog.md:917 -msgid "" -"**SSL-enabled server and client** ([#842](https://github.com/adap/flower/" -"pull/842), [#844](https://github.com/adap/flower/pull/844), [#845](https://" -"github.com/adap/flower/pull/845), [#847](https://github.com/adap/flower/" -"pull/847), [#993](https://github.com/adap/flower/pull/993), [#994](https://" -"github.com/adap/flower/pull/994))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" msgstr "" -#: ../../source/ref-changelog.md:919 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 msgid "" -"SSL enables secure encrypted connections between clients and servers. This " -"release open-sources the Flower secure gRPC implementation to make encrypted " -"communication channels accessible to all Flower users." +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." msgstr "" -#: ../../source/ref-changelog.md:921 -msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** ([#885](https://" -"github.com/adap/flower/pull/885), [#895](https://github.com/adap/flower/" -"pull/895))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" msgstr "" -#: ../../source/ref-changelog.md:923 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive Federated " -"Optimization paper." +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" msgstr "" -#: ../../source/ref-changelog.md:925 -msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** ([#860]" -"(https://github.com/adap/flower/pull/860))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" msgstr "" -#: ../../source/ref-changelog.md:927 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." msgstr "" -#: ../../source/ref-changelog.md:931 -msgid "" -"Update `num_examples` calculation in PyTorch code examples in ([#909]" -"(https://github.com/adap/flower/pull/909))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" msgstr "" -#: ../../source/ref-changelog.md:932 -msgid "" -"Expose Flower version through `flwr.__version__` ([#952](https://github.com/" -"adap/flower/pull/952))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" msgstr "" -#: ../../source/ref-changelog.md:933 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 msgid "" -"`start_server` in `app.py` now returns a `History` object containing metrics " -"from training ([#974](https://github.com/adap/flower/pull/974))" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." msgstr "" -#: ../../source/ref-changelog.md:934 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable ([#978]" -"(https://github.com/adap/flower/pull/978))" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" msgstr "" -#: ../../source/ref-changelog.md:935 -msgid "" -"Increase sleep time after server start to three seconds in all code examples " -"([#1086](https://github.com/adap/flower/pull/1086))" +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" msgstr "" -#: ../../source/ref-changelog.md:936 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 msgid "" -"Added a new FAQ section to the documentation ([#948](https://github.com/adap/" -"flower/pull/948))" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." msgstr "" -#: ../../source/ref-changelog.md:937 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." msgstr "" -#: ../../source/ref-changelog.md:941 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" msgstr "" -#: ../../source/ref-changelog.md:943 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in an " -"upcoming release." +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." msgstr "" -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." msgstr "" -#: ../../source/ref-changelog.md:949 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 msgid "" -"**Experimental virtual client engine** ([#781](https://github.com/adap/" -"flower/pull/781) [#790](https://github.com/adap/flower/pull/790) [#791]" -"(https://github.com/adap/flower/pull/791))" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." msgstr "" -#: ../../source/ref-changelog.md:951 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 msgid "" -"One of Flower's goals is to enable research at scale. This release enables a " -"first (experimental) peek at a major new feature, codenamed the virtual " -"client engine. Virtual clients enable simulations that scale to a (very) " -"large number of clients on a single machine or compute cluster. The easiest " -"way to test the new functionality is to look at the two new code examples " -"called `quickstart_simulation` and `simulation_pytorch`." +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" msgstr "" -#: ../../source/ref-changelog.md:953 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 msgid "" -"The feature is still experimental, so there's no stability guarantee for the " -"API. It's also not quite ready for prime time and comes with a few known " -"caveats. However, those who are curious are encouraged to try it out and " -"share their thoughts." +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" msgstr "" -#: ../../source/ref-changelog.md:955 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 msgid "" -"**New built-in strategies** ([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." msgstr "" -#: ../../source/ref-changelog.md:957 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" msgstr "" -#: ../../source/ref-changelog.md:960 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 msgid "" -"**New PyTorch Lightning code example** ([#617](https://github.com/adap/" -"flower/pull/617))" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" msgstr "" -#: ../../source/ref-changelog.md:962 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 msgid "" -"**New Variational Auto-Encoder code example** ([#752](https://github.com/" -"adap/flower/pull/752))" +"This works as expected, ten clients are training for three rounds of " +"federated learning." msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 msgid "" -"**New scikit-learn code example** ([#748](https://github.com/adap/flower/" -"pull/748))" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." msgstr "" -#: ../../source/ref-changelog.md:966 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 msgid "" -"**New experimental TensorBoard strategy** ([#789](https://github.com/adap/" -"flower/pull/789))" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." msgstr "" -#: ../../source/ref-changelog.md:970 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 msgid "" -"Improved advanced TensorFlow code example ([#769](https://github.com/adap/" -"flower/pull/769))" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." msgstr "" -#: ../../source/ref-changelog.md:971 -msgid "" -"Warning when `min_available_clients` is misconfigured ([#830](https://github." -"com/adap/flower/pull/830))" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" msgstr "" -#: ../../source/ref-changelog.md:972 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 msgid "" -"Improved gRPC server docs ([#841](https://github.com/adap/flower/pull/841))" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." msgstr "" -#: ../../source/ref-changelog.md:973 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 msgid "" -"Improved error message in `NumPyClient` ([#851](https://github.com/adap/" -"flower/pull/851))" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 msgid "" -"Improved PyTorch quickstart code example ([#852](https://github.com/adap/" -"flower/pull/852))" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" msgstr "" -#: ../../source/ref-changelog.md:978 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 msgid "" -"**Disabled final distributed evaluation** ([#800](https://github.com/adap/" -"flower/pull/800))" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on " -"all connected clients, which is often not required (e.g., when using server-" -"side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 msgid "" -"**Renamed q-FedAvg strategy** ([#802](https://github.com/adap/flower/" -"pull/802))" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." msgstr "" -#: ../../source/ref-changelog.md:984 -msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect the " -"notation given in the original paper (q-FFL is the optimization objective, q-" -"FedAvg is the proposed solver). Note the original (now deprecated) " -"`QffedAvg` class is still available for compatibility reasons (it will be " -"removed in a future release)." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" msgstr "" -#: ../../source/ref-changelog.md:986 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` ([#791](https://github.com/adap/flower/pull/791))" +"Here we will explore how to implement custom serialization with a simple " +"example." msgstr "" -#: ../../source/ref-changelog.md:988 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 msgid "" -"This example has been replaced by a new example. The new example is based on " -"the experimental virtual client engine, which will become the new default " -"way of doing most types of large-scale simulations in Flower. The existing " -"example was kept for reference purposes, but it might be removed in the " -"future." +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." msgstr "" -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." msgstr "" -#: ../../source/ref-changelog.md:994 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 msgid "" -"**New built-in strategies** ([#549](https://github.com/adap/flower/pull/549))" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." msgstr "" -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" msgstr "" -#: ../../source/ref-changelog.md:999 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 msgid "" -"**Custom metrics for server and strategies** ([#717](https://github.com/adap/" -"flower/pull/717))" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." msgstr "" -#: ../../source/ref-changelog.md:1001 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." msgstr "" -#: ../../source/ref-changelog.md:1003 -msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are " -"returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and they " -"enable evaluation functions passed to built-in strategies (via `eval_fn`) to " -"return more than two evaluation metrics. Strategies can even return " -"*aggregated* metrics dictionaries for the server to keep track of." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" msgstr "" -#: ../../source/ref-changelog.md:1005 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate " -"from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." msgstr "" -#: ../../source/ref-changelog.md:1007 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." msgstr "" -#: ../../source/ref-changelog.md:1009 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 msgid "" -"**Migration warnings for deprecated functionality** ([#690](https://github." -"com/adap/flower/pull/690))" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." msgstr "" -#: ../../source/ref-changelog.md:1011 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces detailed " -"warning messages if usage of deprecated APIs is detected. The new warning " -"messages often provide details on how to migrate to more recent APIs, thus " -"easing the transition from one release to another." +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" msgstr "" -#: ../../source/ref-changelog.md:1013 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 msgid "" -"Improved docs and docstrings ([#691](https://github.com/adap/flower/" -"pull/691) [#692](https://github.com/adap/flower/pull/692) [#713](https://" -"github.com/adap/flower/pull/713))" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." msgstr "" -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" msgstr "" -#: ../../source/ref-changelog.md:1017 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) [#702](https://github.com/" -"adap/flower/pull/702) [#705](https://github.com/adap/flower/pull/705))" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" msgstr "" -#: ../../source/ref-changelog.md:1021 -msgid "" -"**Serialization-agnostic server** ([#721](https://github.com/adap/flower/" -"pull/721))" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" msgstr "" -#: ../../source/ref-changelog.md:1023 -msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of class " -"`Weights` (which represents parameters as deserialized NumPy ndarrays) was " -"replaced by class `Parameters` (e.g., in `Strategy`). `Parameters` objects " -"are fully serialization-agnostic and represents parameters as byte arrays, " -"the `tensor_type` attributes indicates how these byte arrays should be " -"interpreted (e.g., for serialization/deserialization)." +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" msgstr "" -#: ../../source/ref-changelog.md:1025 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 msgid "" -"Built-in strategies implement this approach by handling serialization and " -"deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR [#721](https://github.com/adap/" -"flower/pull/721) to see how strategies can easily migrate to the new format." +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." msgstr "" -#: ../../source/ref-changelog.md:1027 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use `flwr.server.Server." -"evaluate_round` instead ([#717](https://github.com/adap/flower/pull/717))" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" msgstr "" -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" msgstr "" -#: ../../source/ref-changelog.md:1033 -msgid "" -"**Server-side parameter initialization** ([#658](https://github.com/adap/" -"flower/pull/658))" +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +msgid "`Check out Flower Code Examples `__" msgstr "" -#: ../../source/ref-changelog.md:1035 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"`Use Flower Baselines for your research " +"`__" msgstr "" -#: ../../source/ref-changelog.md:1037 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies will " -"provide these initial parameters to the server on startup and then delete " -"them to free the memory afterwards." +"`Watch Flower AI Summit 2024 videos `__" msgstr "" -#: ../../source/ref-changelog.md:1056 -msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" msgstr "" -#: ../../source/ref-changelog.md:1060 -msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to `flwr.server." -"strategy.FedAvg`, which is equivalent)" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" msgstr "" -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." msgstr "" -#: ../../source/ref-changelog.md:1066 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "시작하기" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) [#572](https://github.com/" -"adap/flower/pull/572) [#633](https://github.com/adap/flower/pull/633))" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." msgstr "" -#: ../../source/ref-changelog.md:1068 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "사전 릴리즈 설치" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to values " -"of the following types: `bool`, `bytes`, `float`, `int`, `str`. This means " -"one can return almost arbitrary values from `fit`/`evaluate` and make use of " -"them on the server side!" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" msgstr "" -#: ../../source/ref-changelog.md:1070 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 msgid "" -"This improvement also allowed for more consistent return types between `fit` " -"and `evaluate`: `evaluate` should now return a tuple `(float, int, dict)` " -"representing the loss, number of examples, and a dictionary holding " -"arbitrary problem-specific values like accuracy." +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." msgstr "" -#: ../../source/ref-changelog.md:1072 -msgid "" -"In case you wondered: this feature is compatible with existing projects, the " -"additional dictionary return value is optional. New code should however " -"migrate to the new return types to be compatible with upcoming Flower " -"releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, `evaluate`: " -"`float, int, Dict[str, Scalar]`). See the example below for details." +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +msgid "Load the data" msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." msgstr "" -#: ../../source/ref-changelog.md:1089 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** `Client." -"evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." msgstr "" -#: ../../source/ref-changelog.md:1091 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means that " -"dictionary values were expected to be strings. The new release generalizes " -"this to enable values of the following types: `bool`, `bytes`, `float`, " -"`int`, `str`." +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." msgstr "" -#: ../../source/ref-changelog.md:1093 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-side " -"and `int(config[\"epochs\"])` on the client side!" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" msgstr "" -#: ../../source/ref-changelog.md:1095 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." msgstr "" -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" msgstr "" -#: ../../source/ref-changelog.md:1116 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 msgid "" -"New example: PyTorch From Centralized To Federated ([#549](https://github." -"com/adap/flower/pull/549))" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." msgstr "" -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" msgstr "" -#: ../../source/ref-changelog.md:1118 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 msgid "" -"New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." msgstr "" -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Define the model" msgstr "" -#: ../../source/ref-changelog.md:1120 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 msgid "" -"Updated examples documentation ([#549](https://github.com/adap/flower/" -"pull/549))" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" msgstr "" -#: ../../source/ref-changelog.md:1121 -msgid "" -"Removed obsolete documentation ([#548](https://github.com/adap/flower/" -"pull/548))" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" msgstr "" -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "릴리즈 동안에" -#: ../../source/ref-changelog.md:1125 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the " -"clients is now handled in `flwr.server.start_server` ([#553](https://github." -"com/adap/flower/pull/553) [#540](https://github.com/adap/flower/issues/540))." +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" msgstr "" -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" msgstr "" -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" msgstr "" -#: ../../source/ref-changelog.md:1131 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 msgid "" -"Added an example for embedded devices ([#507](https://github.com/adap/flower/" -"pull/507))" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." msgstr "" -#: ../../source/ref-changelog.md:1132 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "모델 매개변수." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) ([#504]" -"(https://github.com/adap/flower/pull/504) [#508](https://github.com/adap/" -"flower/pull/508))" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." msgstr "" -#: ../../source/ref-changelog.md:1133 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into the " -"top-level `examples` directory ([#494](https://github.com/adap/flower/" -"pull/494) [#512](https://github.com/adap/flower/pull/512))" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." msgstr "" -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" msgstr "" -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" -msgstr "" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 클라이언트 앱을 실행합니다." -#: ../../source/ref-changelog.md:1139 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 msgid "" -"Renamed strategy methods ([#486](https://github.com/adap/flower/pull/486)) " -"to unify the naming of Flower's public APIs. Other public methods/functions " -"(e.g., every method in `Client`, but also `Strategy.evaluate`) do not use " -"the `on_` prefix, which is why we're removing it from the four methods in " -"Strategy. To migrate rename the following `Strategy` methods accordingly:" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." msgstr "" -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" msgstr "" -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" msgstr "" -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" msgstr "" -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" msgstr "" -#: ../../source/ref-changelog.md:1147 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 msgid "" -"Deprecated `DefaultStrategy` ([#479](https://github.com/adap/flower/" -"pull/479)). To migrate use `FedAvg` instead." +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" msgstr "" -#: ../../source/ref-changelog.md:1148 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 msgid "" -"Simplified examples and baselines ([#484](https://github.com/adap/flower/" -"pull/484))." +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." msgstr "" -#: ../../source/ref-changelog.md:1149 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface ([#483]" -"(https://github.com/adap/flower/pull/483))." +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." msgstr "" -#: ../../source/ref-changelog.md:1150 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 서버앱" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 ([#471](https://github." -"com/adap/flower/pull/471))." +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" msgstr "" -#: ../../source/ref-changelog.md:1151 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 msgid "" -"Improved `Strategy` docstrings ([#470](https://github.com/adap/flower/" -"pull/470))." +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." msgstr "" -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +msgid "Run the training" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate how " -"Flower can be used to federate different kinds of existing machine learning " -"pipelines, usually leveraging popular machine learning frameworks such as " -"`PyTorch `_ or `TensorFlow `_." +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." msgstr "" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" msgstr "" -#: ../../source/ref-example-projects.rst:14 -msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image classification " -"with MobileNetV2:" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" msgstr "" -#: ../../source/ref-example-projects.rst:17 -msgid "" -"`Quickstart TensorFlow (Code) `_" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" msgstr "" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, python-format msgid "" -":doc:`Quickstart TensorFlow (Tutorial) `" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." msgstr "" -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a " -"simple Convolutional Neural Network:" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 msgid "" -"`Quickstart PyTorch (Code) `_" -msgstr "" - -#: ../../source/ref-example-projects.rst:29 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." msgstr "" -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" msgstr "" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 msgid "" -"`PyTorch: From Centralized To Federated (Code) `_" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." msgstr "" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to " -"make it work even better on Colab. Here's a quickstart example:" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." msgstr "" -#: ../../source/ref-faq.rst:10 -msgid "" -"`Flower simulation PyTorch `_" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 msgid "" -"`Flower simulation TensorFlow/Keras `_" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_ " -"and the corresponding `GitHub code example `_." +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" msgstr "" -#: ../../source/ref-faq.rst +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 msgid "" -":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 msgid "" -"Yes, it does. Please take a look at our `blog post `_ or " -"check out the code examples:" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 msgid "" -"`Android Kotlin example `_" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." msgstr "" -#: ../../source/ref-faq.rst:26 -msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 msgid "" -"`Flower meets Nevermined YouTube video `_." +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" msgstr "" -#: ../../source/ref-faq.rst:30 -msgid "" -"`Flower meets KOSMoS `_." +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 msgid "" -"`Flower meets Talan blog post `_ ." +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 msgid "" -"`Flower meets Talan GitHub Repository `_ ." +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to make " -"well-informed decisions to improve Flower. Doing this enables the Flower " -"team to understand how Flower is used and what challenges users might face." +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" msgstr "" -#: ../../source/ref-telemetry.md:5 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 msgid "" -"**Flower is a friendly framework for collaborative AI and data science.** " -"Staying true to this statement, Flower makes it easy to disable telemetry " -"for users that do not want to share anonymous usage metrics." +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "Flower 시뮬레이션." + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" msgstr "" -#: ../../source/ref-telemetry.md:9 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 msgid "" -"We follow strong principles guarding anonymous usage metrics collection:" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" msgstr "" -#: ../../source/ref-telemetry.md:11 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to learn " -"“[How to opt-out](#how-to-opt-out)”." +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" msgstr "" -#: ../../source/ref-telemetry.md:12 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not contain " -"any personally identifiable information (PII). See “[Collected metrics]" -"(#collected-metrics)” to understand what metrics are being reported." +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-to-" -"inspect-what-is-being-reported)”" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 msgid "" -"**Open for feedback:** You can always reach out to us if you have feedback; " -"see the section “[How to contact us](#how-to-contact-us)” for details." +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." msgstr "" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example, `." -"bashrc` (or whatever configuration file applies to your environment) to " -"disable Flower telemetry permanently." +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." msgstr "" -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 msgid "" -"**Flower version.** Understand which versions of Flower are currently being " -"used. This helps us to decide whether we should invest effort into releasing " -"a patch version for an older version of Flower or instead use the bandwidth " -"to build new features." +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "연합 학습이란 무엇입니까?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." msgstr "" +"이 튜토리얼에서 연합 학습이 무엇인지 배우고 Flower로 첫 번째 시스템을 구축하고 점진적으로 확장해 나갈 것입니다. 본 " +"튜토리얼의 모든 부분을 완성할 수 있다면, 당신은 고급 연합 학습 시스템을 구축하여 그 분야의 현재 최고 기술 수준에 접근할 수 " +"있을 것입니다." -#: ../../source/ref-telemetry.md:36 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 msgid "" -"**Hardware properties.** Understanding the hardware environment that Flower " -"is being used in helps to decide whether we should, for example, put more " -"effort into supporting low-resource environments." +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." msgstr "" +"🧑‍🏫이 튜토리얼은 사전 지식을 많이 필요로 하지 않으며 연합 학습에 대해 상세히알 필요는 없습니다. 데이터 과학과 파이썬 " +"프로그래밍에 대한 기본적인 이해만 가정합니다." -#: ../../source/ref-telemetry.md:38 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables us " -"to understand how heavily certain features are being used and better " -"prioritize based on that." +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." msgstr "" +"`Star Flower on GitHub `__ ⭐️ Slack의 오픈소스" +" Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 있습니다: `Slack 가입`__ 🌼 ``#introductions``채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 " +"있으시면``#questions`` 채널로 방문해 주시기 바랍니다." -#: ../../source/ref-telemetry.md:40 -msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete them." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "전통적인 머신러닝(기계학습)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in `~/.flwr/" -"source` the first time a telemetry event is generated. The source ID is " -"important to identify whether an issue is recurring or whether an issue is " -"triggered by multiple clusters running concurrently (which often happens in " -"simulation). For example, if a device runs multiple workloads at the same " -"time, and this results in an issue, then, in order to reproduce the issue, " -"multiple workloads must be started at the same time." -msgstr "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "연합 학습에 대해 논의하기 전에 현재 대부분의 머신러닝이 어떻게 작동하는지 간략히 요약하겠습니다." -#: ../../source/ref-telemetry.md:44 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 msgid "" -"You may delete the source ID at any time. If you wish for all events logged " -"under a specific source ID to be deleted, you can send a deletion request " -"mentioning the source ID to `telemetry@flower.ai`. All events related to " -"that source ID will then be permanently deleted." +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." msgstr "" +"머신러닝에서 우리는 모델과 데이터를 가지고 있습니다. 모델은 신경망(그림과 같이)일 수도 있고 고전적인 선형 회귀와 같은 다른 것일" +" 수도 있습니다." -#: ../../source/ref-telemetry.md:46 -msgid "" -"We will not collect any personally identifiable information. If you think " -"any of the metrics collected could be misused in any way, please [get in " -"touch with us](#how-to-contact-us). We will update this page to reflect any " -"changes to the metrics collected and publish changes in the changelog." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "모델과 데이터" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." msgstr "" +"우리는 유용한 작업을 수행하기 위해 데이터를 사용하여 모델을 훈련합니다. 작업은 이미지 속 물체를 감지하거나 음성 녹음을 기록하거나" +" 바둑과 같은 게임을 하는 것일 수 있습니다." -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "데이터를 이용한 모델 훈련" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information by " -"setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging is " -"disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." -msgstr "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "실제로 우리가 사용하는 훈련 데이터는 모델을 훈련시키는 기계에서 비롯된 것이 아닙니다. 그 데이터는 다른 곳에서 만들어졌습니다." -#: ../../source/ref-telemetry.md:58 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics, " -"use both environment variables:" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." msgstr "" +"스마트폰에서 사용자와 앱의 상호 작용, 센서 데이터를 수집하는 자동차, 키보드를 통해 입력을 받는 노트북 또는 누군가 노래를 " +"부르리는 것을 듣는 스마트 스피커에서 비롯됩니다." -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" msgstr "" -#: ../../source/ref-telemetry.md:66 -msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "핸드폰에 있는 데이터" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android app " -"using Flower." +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." msgstr "" +"또한 중요한 것은 이 \"다른 곳\"이 보통 한 곳만 아니라 여러 곳이라는 것입니다. 같은 앱을 실행하는 여러 기기일 수도 " +"있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "데이터가 여러 장치에 있습니다" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 msgid "" -"Let's build a federated learning system using TFLite and Flower on Android!" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." msgstr "" +"따라서 머신러닝이나 어떤 종류의 데이터 분석을 이용하려면 과거에는 중앙 서버에서 모든 데이터를 수집하는 방법이 사용되었습니다. 이 " +"서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." -#: ../../source/tutorial-quickstart-android.rst:12 -msgid "" -"Please refer to the `full code example `_ to learn more." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "중앙 데이터 수집" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"FastAI to train a vision model on CIFAR-10." +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." msgstr "" +"모든 데이터가 한 곳에 모이면, 우리는 궁극적으로 머신러닝 알고리즘을 사용하여 데이터에서 모델을 훈련시킬 수 있습니다. 이것이 바로" +" 우리가 기본적으로 사용해 온 머신러닝 방법입니다." -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "중앙 데이터 훈련" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "클래식 머신러닝의 어려움" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 msgid "" -"Please refer to the `full code example `_ to learn more." +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." msgstr "" +"우리가 방금 본 전통적 머신러닝의 접근 방식은 경우에 따라 다르게 사용될 수 있습니다. 좋은 예로는 휴일 사진을 분류하거나 웹 " +"트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "집중화 가능" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower with " -"HuggingFace Transformers in order to fine-tune an LLM." +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." msgstr "" +"그러나 이 방법은 다른 많은 경우에 적용되지 않을 수 있습니다. 예를 들어, 중앙 집중식 서버에 데이터가 없거나 서버의 데이터가 " +"좋은 모델을 훈련하기에 충분하지 않을 수 있습니다." -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "집중화 불가능" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 msgid "" -"Let's build a federated learning system using Hugging Face Transformers and " -"Flower!" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" msgstr "" +"전통적인 중앙 집중식 머신러닝 방법이 현실 세계에서 매우 중요한 수많은 사용 사례를 충족시킬 수 없는 이유가 있습니다. 이유는 " +"다음과 같은 여러 가지가 있습니다:" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 msgid "" -"We will leverage Hugging Face to federate the training of language models " -"over multiple clients using Flower. More specifically, we will fine-tune a " -"pre-trained Transformer model (distilBERT) for sequence classification over " -"a dataset of IMDB ratings. The end goal is to detect if a movie rating is " -"positive or negative." +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." msgstr "" +"**규정**: GDPR (유럽), CCPA (캘리포니아), PIPEDA (캐나다), LGPD (브라질), PDPL (아르헨티나), " +"KVKK (터키), POPI (남아프리카공화국), FSS (러시아), CDPR (중국), PDPB (인도), PIPA (한국), " +"APPI (일본), PDP (인도네시아), PDPA (싱가포르), APP (호주)등의 법규로 민감한 데이터가 이동하지 않도록 " +"보호하고 있습니다. 실제로 이러한 규정은 사용자가 세계의 다른 지역에 살고 데이터가 다른 데이터 보호 규정에 의해 통제되기 때문에 " +"단일 조직이 자체 사용자 데이터를 인공 지능 학습에 사용하는 것을 방지하기도 합니다." -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." msgstr "" +"**사용자 선호도**: 규정 외에도 일부 사용 사례에서 사용자는 데이터가 자기 장치를 떠나지 않기를 예상합니다. 휴대폰의 디지털 " +"키보드에 비밀번호와 신용카드 정보를 입력하면 비밀번호가 해당 키보드를 개발한 회사의 서버에 뜨길 원하지는 않겠죠? 사실, 이 사용 " +"사례가 애당초 연합 학습이 발명된 이유였습니다." -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, " -"and :code:`transformers`. This can be done using :code:`pip`:" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." msgstr "" +"**데이터 볼륨**: 일부 센서(예:카메라)는 너무 많은 데이터 볼륨을 생성하여 모든 데이터를 수집하는 것이 실현 가능하지도 않고 " +"경제적이지도 않습니다(예: 대역폭 또는 통신 효율로 인해). 전국에 수백 개 기차역이 있는 국가 철도 서비스를 생각해 보세요. 각 " +"기차역에 수 많은 보안 카메라가 설치되어 있다면, 그들이 생산하는 대량의 미가공 된 온디바이스 데이터는 처리 및 저장을 위해 " +"엄청나게 강력하고 매우 비싼기반 구조를 필요로 합니다. 그런데 대부분의 데이터는 유용하지도 않습니다." -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "중앙 집중식 머신러닝이 작동하지 않는 예는 다음과 같습니다:" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "여러 병원의 민감한 의료기록으로 암 검진 모델 훈련" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" -msgstr "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "금융 사기를 탐지하기 위한 다양한 조직의 금융 정보" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "더 나은 범위 예측을 위해 전기 자동차의 위치 데이터" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "더 나은 자동 완성 모델을 훈련시키기 위한 엔드 투 엔드 암호화 된 메시지" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 msgid "" -"Once we have a way of creating our trainloader and testloader, we can take " -"care of the training and testing. This is very similar to any :code:" -"`PyTorch` training or testing loop:" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." msgstr "" +"`Brave `__ 브라우저나 `Signal `__ " +"메신저와 같은 개인 정보 보호 시스템의 인기는 사용자들이 개인 정보 보호에 신경 쓴다는 것을 보여줍니다. 실제로 그러한 대안이 " +"존재하는 경우 다른 대안보다 개인 정보 보호 강화 버전을 선택합니다. 그런데 이러한 사례에 머신러닝 및 데이터 과학을 적용하여 " +"프라이버시 데이터를 활용하려면 어떻게 해야 합니까? 이 모든 분야는 최근 AI의 발전으로 상당한 이익을 얻을 수 있는 분야입니다." + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "연합 학습" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" msgstr "" +"연합 학습은 이 방법을 쉽게 뒤집었습니다. 데이터를 컴퓨팅 센터로 옮기는 대신 컴퓨팅 능력을 데이터가 생성되는 장소로 이동 " +"시킴으로써 분산된 데이터에서 머신러닝을 실현합니다. 요약하자면:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "중앙 집중식 머신러닝: 데이터를 컴퓨팅 센터로 이동" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "연합(기계)학습: 컴퓨팅을 데이터로 옮김" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" +"이를 통해 이전에는 불가능했던 분야에서 머신러닝(및 기타 데이터 과학 방법)을 사용할 수 있습니다. 이제 다양한 병원이 협력할 수 " +"있도록 함으로써 우수한 의료 AI 모델을 훈련할 수 있습니다. 다양한 금융 기관의 데이터에 대한 AI 모델을 훈련하여 금융 사기를 " +"해결할 수 있습니다. 개인 정보 보호를 강화하지 않는 대안보다 더 나은 AI가 내장된 새로운 개인 정보 보호 강화 애플리케이션(예:" +" 보안 메시징)을 구축할 수 있습니다. 그것들은 떠오르는 몇 가지 예에 불과합니다. 연합 학습을 구축함에 따라 이전에 액세스할 수 " +"없었던 많은 데이터에 액세스할 수 있게 되었기 때문에 갑자기 재생될 수 있는 영역이 점점 더 많아지고 있습니다." -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT " -"model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" -msgstr "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "그렇다면 연합 학습은 어떻게 작동합니까? 직관적인 설명부터 시작하겠습니다." -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "연합 학습의 5단계" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "0단계: 글로벌 모델 초기화" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). This " -"is very easy, as our model is a standard :code:`PyTorch` model:" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." msgstr "" +"서버에서 모델을 초기화하는 것으로 시작합니다. 이것은 전통적인 중앙 집중식 학습과도 동일합니다: 임의로 또는 이전에 저장된 " +"체크포인트에서 모델 매개변수를 초기화합니다." -#: ../../source/tutorial-quickstart-huggingface.rst:169 -msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the server " -"to send its parameters to the client. Finally, the :code:`fit` function " -"trains the model locally for the client, and the :code:`evaluate` function " -"tests the model locally and returns the relevant metrics." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "글로벌 모델 초기화" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 msgid "" -"Now that we have a way to instantiate clients, we need to create our server " -"in order to aggregate the results. Using Flower, this can be done very " -"easily by first choosing a strategy (here, we are using :code:`FedAvg`, " -"which will define the global weights as the average of all the clients' " -"weights at each round) and then using the :code:`flwr.server.start_server` " -"function:" -msgstr "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "1단계: 연결된 여러 조직/장치(클라이언트 노드)에 모델 전송" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 msgid "" -"The :code:`weighted_average` function is there to provide a way to aggregate " -"the metrics distributed amongst the clients (basically this allows us to " -"display a nice average accuracy and loss for every round)." +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." msgstr "" +"다음으로 글로벌 모델의 파라미터를 연결된 클라이언트 노드(예: 스마트폰과 같은 에지 디바이스 또는 조직에 속한 서버)로 보냅니다. " +"이것은 각 참여 노드가 동일한 모델 매개변수를 사용하여 로컬 훈련을 시작하도록 하기 위함입니다. 일반적으로 모든 노드가 아닌 몇 " +"개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "글로벌 모델 전송" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "2단계: 각 조직/장치(클라이언트 노드)의 데이터에 대해 로컬로 모델 훈련" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 msgid "" -"If you want to check out everything put together, you should check out the " -"`full code example `_ ." +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." msgstr "" +"이제 모든(선택된) 클라이언트 노드에는 최신 버전의 글로벌 모델 파라미터가 있으며 로컬 훈련을 시작합니다. 그들은 자신의 로컬 " +"데이터 세트를 사용하여 자신의 로컬 모델을 훈련합니다. 모델이 완전히 수렴할 때까지 훈련하지 않고 잠시만 훈련합니다. 이는 로컬 " +"데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다." -#: ../../source/tutorial-quickstart-huggingface.rst:226 -msgid "" -"Of course, this is a very basic example, and a lot can be added or modified, " -"it was just to showcase how simply we could federate a Hugging Face workflow " -"using Flower." +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 -msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very " -"well used :code:`TensorFlow`." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "로컬 데이터에 대한 훈련" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "3단계: 모델 파라미터를 업데이트하여 서버로 되돌리기" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." msgstr "" +"로컬 훈련 후에는 클라이언트 노드마다 원래 받은 모델 파라미터의 버전이 조금씩 다릅니다. 파라미터가 다른 이유는 각 클라이언트 " +"노드의 로컬 데이터 세트에 다른 데이터가 있기 때문입니다. 그런 다음 클라이언트 노드는 이러한 모델 업데이트를 서버로 다시 " +"보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 -msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST using " -"Flower and CoreML on iOS devices." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "모델 업데이트 전송" -#: ../../source/tutorial-quickstart-ios.rst:12 -msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv " -"`. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "4단계: 모델 업데이트를 새 글로벌 모델로 집계" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that " -"all have the same model." +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" msgstr "" +"서버는 선택된 클라이언트 노드들로부터 모델 업데이트들을 수신합니다. 서버가 100개의 클라이언트 노드를 선택했다면 이제 각각 " +"클라이언트의 로컬 데이터를 기반으로 훈련된 100개의 조금씩 다른 원래 글로벌 모델 버전을 갖게 됩니다. 하지만 우리는 100개의 " +"모든 클라이언트 노드의 데이터에서 학습한 내용을 포함하는 모델을 하나만 갖고 싶지 않았습니까?" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 msgid "" -"*Clients* are responsible for generating individual weight updates for the " -"model based on their local datasets. These updates are then sent to the " -"*server* which will aggregate them to produce a better model. Finally, the " -"*server* sends this improved version of the model back to each *client*. A " -"complete cycle of weight updates is called a *round*." -msgstr "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"단일 모델 하나를 얻으려면 클라이언트 노드에서 받은 모든 모델 업데이트를 결합해야 합니다. 이 과정이 *집합*라고 하며 여러 가지 " +"방법이 있습니다. 가장 기본적인 방법은*Federated Averaging* (`McMahan et al., 2016 " +"`__)이라고 하고 보통 줄여서 *FedAvg*로 표기합니다. " +"*FedAvg* 는 100개의 모델 업데이트를 받아 이름에서 알 수 있듯이 모델 업데이트를 평균화합니다. 더 정확히 말하면, 모델 " +"업데이트의 *가중 평균* 을 각 클라이언트가 훈련에 사용한 예제 수에 따라 가중치를 부여합니다. 가중치는 각 데이터 예제가 결과 " +"글로벌 모델에 동일한 \"영향\" 을 미치는지 확인하는 데 중요합니다. 한 클라이언트에 10개의 데이터 포인트가 있고 다른 " +"클라이언트에 100개의 데이터 포인트가 있다면 가중치를 부여하지 않고 10개의 예가 100개의 사례보다 글로벌 모델에 10배 더 " +"많은 영향을 미칩니다." -#: ../../source/tutorial-quickstart-ios.rst:21 -msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You " -"can do this by using pip:" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "모델 업데이트 집계" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "5단계: 모델이 수렴할 때까지 1~4단계를 반복합니다" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and MNIST " -"as our dataset. For simplicity reasons we will use the complete Flower " -"client with CoreML, that has been implemented and stored inside the Swift " -"SDK. The client implementation can be seen below:" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." msgstr "" +"단계 1에서 4는 우리가 말하는 단일 라운드 연합 학습입니다. 글로벌 모델 파라미터는 참여하는 클라이언트 노드에 전송되고(1단계)," +" 클라이언트 노드는 로컬 데이터에 대한 훈련을 받고(2단계), 업데이트된 모델을 서버에 전송하고(3단계), 서버는 모델 업데이트를 " +"집계하여 글로벌 모델의 새로운 버전을 얻습니다(4단계)." -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a " -"dependency in your project. For our application, we will store the logic of " -"our app in :code:`FLiOSModel.swift` and the UI elements in :code:" -"`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift` in this " -"quickstart. Please refer to the `full code example `_ to learn more about the app." +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." msgstr "" +"한 라운드의 반복에서 해당 반복에 참여하는 각 클라이언트 노드는 짧은 시간 동안만 훈련합니다. 집계 단계(4단계) 이후 우리 모델이" +" 관련된 모든 클라이언트 노드의 모든 데이터에 대해 잠시 동안만 훈련되었음을 의미합니다. 그런 다음 모든 클라이언트 노드의 " +"데이터에서 잘 작동하는 완전히 훈련된 모델에 도달하려면 이 훈련 과정을 계속 반복해야 합니다." -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS device. " -"We need to pass the url to access mlmodel and run CoreML machine learning " -"processes, it can be retrieved by calling the function :code:`Bundle.main." -"url`. For the MNIST dataset, we need to preprocess it into :code:" -"`MLBatchProvider` object. The preprocessing is done inside :code:`DataLoader." -"swift`." -msgstr "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"축하합니다, 이제 연합 학습의 기초에 대해 알게 되었습니다. 물론 아직 논의해야 할 내용이 많지만 이는 연합 학습의 축소판일 " +"뿐입니다. 본 튜토리얼의 후반부에는 좀 더 자세히 설명하겠습니다. 흥미로운 질문은 다음과 같습니다: 다음 라운드에 참여해야 할 가장" +" 좋은 클라이언트 노드를 어떻게 선택할 수 있을까요? 모델 업데이트를 집계하는 가장 좋은 방법은 무엇일까요? 실패한 클라이언트 " +"노드(낙오자)를 어떻게 처리할 수 있을까요?" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 msgid "" -"Since CoreML does not allow the model parameters to be seen before training, " -"and accessing the model parameters during or after the training can only be " -"done by specifying the layer name, we need to know this information " -"beforehand, through looking at the model specification, which are written as " -"proto files. The implementation can be seen in :code:`MLModelInspect`." +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." msgstr "" +"다양한 클라이언트 노드의 분산된 데이터에 대해 모델을 훈련할 수 있는 것처럼 해당 데이터에 대한 모델을 평가하여 가치 있는 " +"메트릭(metrics)을 받을 수도 있습니다. 이를 연합 평가라고 하며 FE라고 약칭하기도 합니다. 사실 연합 평가는 대부분의 연합" +" 학습 시스템에서 필수적인 부분입니다." -#: ../../source/tutorial-quickstart-ios.rst:102 -msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "연합 분석" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 msgid "" -"Then start the Flower gRPC client and start communicating to the server by " -"passing our Flower client to the function :code:`startFlwrGRPC`." +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." msgstr "" +"많은 경우 머신러닝은 데이터로부터 가치를 얻기 위한 필수 조건이 아닙니다. 데이터 분석을 통해 귀중한 통찰력을 얻을 수 있지만, " +"명확한 답변을 얻기에는 데이터가 충분하지 않은 경우가 많습니다. 특정 유형의 건강 상태가 발생하는 평균 연령은 몇 살입니까? 연합 " +"분석을 사용하면 여러 클라이언트 노드에서 이러한 쿼리(query)를 실행할 수 있습니다. 서버가 단일 클라이언트 노드에서 제출한 " +"결과를 보지 못하도록 보안을 강화한 집합 방식과 같은 다른 프라이버시 향상 기술과 함께 자주 사용됩니다." -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 msgid "" -"That's it for the client. We only have to implement :code:`Client` or call " -"the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The " -"attribute :code:`hostname` and :code:`port` tells the client which server to " -"connect to. This can be done by entering the hostname and port in the " -"application before clicking the start button to start the federated learning " -"process." -msgstr "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" +"차분 프라이버시(Differential Privacy)는 연합 학습의 맥락에서 종종 언급됩니다. 통계 데이터를 분석하고 공유할 때 " +"사용하는 프라이버시 보호 방식으로, 참가자 개인의 프라이버시를 보장합니다. 차분 프라이버시는 모델 업데이트에 통계적 " +"잡음(noise)를 추가하여 개별 참가자의 정보를 구별하거나 재식별할 수 없도록 함으로써 이를 달성합니다. 이 기술은 정량적 개인 " +"정보 보호 조치를 제공하는 최적화라고 볼 수 있습니다." -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named :code:" -"`server.py`, import Flower and start the server:" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." msgstr "" +"연합 학습, 연합 평가 및 연합 분석은 머신러닝 모델을 앞뒤로 이동하고 로컬 데이터에 대해 훈련 및 평가한 다음 업데이트된 모델을 " +"통합하기 위한 기본 프레임워크가 필요합니다. Flower가 제공하는 기반 구조는 간단하고 확장 가능하며 안전한 방식으로 이러한 " +"목표를 달성합니다. 간단히 말해서, Flower는 연합 학습, 분석 및 평가를 위한 통합 접근 방식을 제공합니다. 이를 통해 " +"사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다." -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and multiple " -"clients. We therefore have to start the server first:" -msgstr "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower 연합 학습 서버 및 클라이언트 노드(자동차, 스쿠터, 개인용 컴퓨터, 룸바, 전화)" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 msgid "" -"Once the server is running we can start the clients in different terminals. " -"Build and run the client through your Xcode, one through Xcode Simulator and " -"the other by deploying it to your iPhone. To see more about how to deploy " -"your app to iPhone or Simulator visit `here `_." -msgstr "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "축하합니다, 지금까지 당신은 연합 학습의 기본 지식과 그것이 어떻게 전통적 (중앙 집중식) 머신러닝과 관련되는지 배웠습니다!" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code `_ for this example can be found in :" -"code:`examples/ios`." -msgstr "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "이 튜토리얼의 다음 부분에서는 Flower와 함께 첫 번째 연합 학습 시스템을 구축할 것입니다." -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"Jax to train a linear regression model on a scikit-learn dataset." +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"Pandas to perform Federated Analytics." -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." msgstr "" +"`Flower 연합 학습 튜토리얼- 1부 `__ PyTorch와 Flower를 사용하여 간단한 연합 학습 시스템을" +" 구축하는 방법을 보여줍니다." -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:12 -msgid "" -"Please refer to the `full code example `_ to learn more." -msgstr "" +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "현재, Flower는 \"base\" 이미지 그리고 " +#~ "\"superlink\" 이미지를 제공합니다. base 이미지는 이름에서" +#~ " 알 수 있듯이 SuperLink가 필요로 하는 기본" +#~ " dependencies를 포함하고 있습니다. 여기에는 시스템 " +#~ "dependencies, Python 및 Python 도구가 포함됩니다." +#~ " SuperLink 이미지는 base 이미지를 기반으로 하지만" +#~ " \"pip\"을 사용하여 SuperLink를 추가로 설치합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"PyTorch to train a CNN model on MNIST." -msgstr "" +#~ msgid "``3.11``" +#~ msgstr "``3.11``" -#: ../../source/tutorial-quickstart-pytorch.rst:13 -msgid "" -"In this tutorial we will learn how to train a Convolutional Neural Network " -"on CIFAR10 using Flower and PyTorch." -msgstr "" +#~ msgid "Defaults to ``22.04``." +#~ msgstr "``22.04``이 기본값." -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 -msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." -msgstr "" +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "``flwr/base``이 기본값." -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 -msgid "" -"Our example consists of one *server* and two *clients* all having the same " -"model." -msgstr "" +#~ msgid "The Python version of the base image." +#~ msgstr "base 이미지의 Python 버전." -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"*Clients* are responsible for generating individual weight-updates for the " -"model based on their local datasets. These updates are then sent to the " -"*server* which will aggregate them to produce a better model. Finally, the " -"*server* sends this improved version of the model back to each *client*. A " -"complete cycle of weight updates is called a *round*." -msgstr "" +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "``py3.11``이 기본값." -#: ../../source/tutorial-quickstart-pytorch.rst:23 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "" +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "``ubuntu22.04``이 기본값." -#: ../../source/tutorial-quickstart-pytorch.rst:29 -msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go ahead " -"and install PyTorch and the **torchvision** library:" -msgstr "" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "``flwr``이 기본값." -#: ../../source/tutorial-quickstart-pytorch.rst:39 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training procedure " -"and network architecture are based on PyTorch's `Deep Learning with PyTorch " -"`_." -msgstr "" +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" +#~ "이미지의 이름은 ``flwr_superlink``이고 태그는 " +#~ "``0.1.0``입니다. 필요에 따라 빌드 argument들 뿐만 " +#~ "아니라 이름과 태그도 정할 수 있습니다. 이 값들은" +#~ " 예시일 뿐입니다." -#: ../../source/tutorial-quickstart-pytorch.rst:41 -msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" -msgstr "" +#~ msgid "Edge Client Engine" +#~ msgstr "엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-pytorch.rst:62 -msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads the " -"training and test data that are then normalized." -msgstr "" +#~ msgid "Virtual Client Engine" +#~ msgstr "가상 클라이언트 엔진" -#: ../../source/tutorial-quickstart-pytorch.rst:78 -msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset is " -"done by looping over the dataset, measure the corresponding loss and " -"optimize it." -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 클라이언트 엔진" -#: ../../source/tutorial-quickstart-pytorch.rst:94 -msgid "" -"Define then the validation of the machine learning network. We loop over " -"the test set and measure the loss and accuracy of the test set." -msgstr "" +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "동일 작업에서 가상 클라이언트 엔진과 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-pytorch.rst:113 -msgid "" -"After defining the training and testing of a PyTorch machine learning model, " -"we use the functions for the Flower clients." -msgstr "" +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "`Flower `_의 핵심 프레임워크 아키텍처와 가상 및 엣지 클라이언트 엔진" -#: ../../source/tutorial-quickstart-pytorch.rst:115 -msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 Minute " -"Blitz':" -msgstr "" +#~ msgid "Clone the flower repository." +#~ msgstr "Flower 레포지토리를 복제합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:142 -msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." -msgstr "" +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ ":doc:Run Flower using Docker 의 첫 번째 섹션을" +#~ " 따라 주십시오. 해당 부분을 더 자세히 설명해 " +#~ "줍니다." -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 -msgid "" -"The Flower server interacts with clients through an interface called :code:" -"`Client`. When the server selects a particular client for training, it sends " -"training instructions over the network. The client receives those " -"instructions and calls one of the :code:`Client` methods to run your code (i." -"e., to train the neural network we defined earlier)." -msgstr "" +#~ msgid "``22.04``" +#~ msgstr "``22.04``" -#: ../../source/tutorial-quickstart-pytorch.rst:150 -msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which makes " -"it easier to implement the :code:`Client` interface when your workload uses " -"PyTorch. Implementing :code:`NumPyClient` usually means defining the " -"following methods (:code:`set_parameters` is optional though):" -msgstr "" +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "" +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr "" +#~ msgid "``1.8.0``" +#~ msgstr "``1.8.0``" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid "" -"update the local model weights with the parameters received from the server" -msgstr "" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "SuperLink/SuperNode 또는 ServerApp 이미지 빌드" -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "" +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "``1.8.0-py3.10-ubuntu22.04``" -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "다음 예시에서는 공식 Flower 기본 이미지로 SuperLink/SuperNode 또는 ServerApp이미지를 만듭니다:" -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Docker 이미지 빌드를 위해 CI를 트리거합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "" +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" +#~ "워크플로우를 트리거하려면 공동 작업자가 GitHub CI에서 " +#~ "``workflow_dispatch``를 생성해야 합니다. 이 작업은 " +#~ "UI 또는 GitHub CLI 를 통해 수행할 수" +#~ " 있습니다. 이벤트는 Flower 버전 한 가지 입력만" +#~ " 필요합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "" +#~ msgid "**Via the UI**" +#~ msgstr "**UI를 통해서**" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 -msgid "" -"We can now create an instance of our class :code:`CifarClient` and add one " -"line to actually run this client:" -msgstr "" +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" +#~ "``Build docker images`` 워크플로우 `페이지 " +#~ "`_로 이동합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -msgid "" -"That's it for the client. We only have to implement :code:`Client` or :code:" -"`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a " -"client of type :code:`NumPyClient` you'll need to first call its :code:" -"`to_client()` method. The string :code:`\"[::]:8080\"` tells the client " -"which server to connect to. In our case we can run the server and the client " -"on the same machine, therefore we use :code:`\"[::]:8080\"`. If we run a " -"truly federated workload with the server and clients running on different " -"machines, all that needs to change is the :code:`server_address` we point " -"the client at." -msgstr "" +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "``Run workflow`` 버튼을 누르고 ``Version of Flower``에 Flower의 새버전을 입력합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 -msgid "" -"Once the server is running we can start the clients in different terminals. " -"Open a new terminal and start the first client:" -msgstr "" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "**초록색**의 ``Run workflow``버튼을 클릭합니다." -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "" +#~ msgid "**Via the GitHub CI**" +#~ msgstr "**GitHub CI를 통해서**" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 -msgid "" -"Each client will have its own dataset. You should now see how the training " -"does in the very first terminal (the one that started the server):" -msgstr "" +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "``gh auth login``을 통해 로그인 했는지, 현재 작업 디렉토리가 Flower 리포지토리의 root인지 확인하세요." -#: ../../source/tutorial-quickstart-pytorch.rst:271 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code `_ for this example can be found " -"in :code:`examples/quickstart-pytorch`." -msgstr "" +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" +#~ "``gh workflow run docker-images.yml -f" +#~ " flwr-version=``을 통해 워크플로우 를" +#~ " 트리거합니다." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "예시: JAX - JAX Federated 실행" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "" +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" +#~ "Flower를 시작하는 가장 간단한 방법은 `Docker " +#~ "Hub `__에서 찾을 수 " +#~ "있는 미리 만들어진 Docker 이미지를 사용하는 것입니다." +#~ " 지원되는 아키텍처는 ``amd64`` 및 ``arm64v8``입니다." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 -msgid "" -"Let's build a horizontal federated learning system using PyTorch Lightning " -"and Flower!" -msgstr "" +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" +#~ "전이 표시되지 않고 대신 명령을 찾을 수 없다는" +#~ " 오류가 표시되는 경우 먼저 Docker를 설치해야 " +#~ "합니다. `여기 `_에서" +#~ " 설치 지침을 찾을 수 있습니다." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 -msgid "" -"Please refer to the `full code example `_ to learn more." -msgstr "" +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" +#~ "Linux에서 Docker 명령을 실행하려면 ``sudo`` 권한이" +#~ " 필요합니다. ``sudo`` 를 사용하지 않으려면 공식 " +#~ "Docker 웹사이트의 `Post-installation steps " +#~ "`_를" +#~ " 따르세요." -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"scikit-learn to train a linear regression model." -msgstr "" +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" +#~ "최적의 성능과 호환성을 보장하려면 SuperLink, SuperNode" +#~ " 및 ServerApp 이미지를 함께 실행할 때 버전이" +#~ " 동일해야 합니다. 이렇게 하면 원활한 통합을 보장하고" +#~ " 서로 다른 버전을 사용할 때 발생할 수 있는" +#~ " 잠재적인 충돌이나 문제를 방지할 수 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "" +#~ msgid "Flower SuperLink" +#~ msgstr "Flower SuperLink" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 -msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic Regression` " -"model on MNIST using Flower and scikit-learn." -msgstr "" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "Flower를 사용해보고 싶다면 다음 명령을 사용하면 됩니다:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -msgid "" -"It is recommended to create a virtual environment and run everything within " -"this :doc:`virtualenv `." -msgstr "" +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" +#~ "이 명령은 Docker Hub에서 ``1.8.0`` 태그가 " +#~ "있는 Docker 이미지를 가져옵니다. 이 태그는 Flower" +#~ " 버전을 지정합니다. 이 경우, Flower 1.8.0입니다." +#~ " '`--rm`` 플래그는 컨테이너가 종료된 후 컨테이너를 " +#~ "제거하도록 Docker에 지시합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 -msgid "" -"*Clients* are responsible for generating individual model parameter updates " -"for the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce an updated global model. " -"Finally, the *server* sends this improved version of the model back to each " -"*client*. A complete cycle of parameters updates is called a *round*." -msgstr "" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" +#~ "``-p :`` 플래그는 호스트의 포트 " +#~ "``9091``/``9092``를 컨테이너의 ``9091``/``9092``에 매핑하여 " +#~ "``http://localhost:9091``의 드라이버 API와 " +#~ "``http://localhost:9092``의 Fleet API에 액세스할 수" +#~ " 있도록 Docker에 지시합니다. 마지막으로, 태그 뒤에 " +#~ "오는 모든 플래그는 Flower SuperLink에 전달됩니다. " +#~ "여기서는 ``--insecure``플래그를 전달합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 -msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" +#~ "``--insecure`` 플래그는 안전하지 않은 통신(HTTPS가 아닌" +#~ " HTTP 사용)을 활성화하며 테스트 목적으로만 사용해야 " +#~ "합니다. 프로덕션 환경에 배포할 때는 `SSL " +#~ "`__을 활성화할 것을 강력히 권장합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "'`--help``을 사용하면 SuperLink가 지원하는 모든 플래그를 볼 수 있습니다:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "호스트 시스템에 상태를 저장할 볼륨 마운트하기" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that we " -"need for our federated learning setup within :code:`utils.py`. The :code:" -"`utils.py` contains different functions defining all the machine learning " -"basics:" -msgstr "" +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" +#~ "호스트 시스템에서 SuperLink의 상태를 유지하려면 호스트 " +#~ "시스템에서 파일을 저장할 디렉터리와 데이터베이스 파일의 이름을" +#~ " 지정하기만 하면 됩니다. 기본적으로 SuperLink 컨테이너는" +#~ " 사용자 ID가 ``49999``인 ``app``이라는 루트가 아닌" +#~ " 사용자로 실행됩니다. 마운트된 디렉터리에 적절한 권한이 " +#~ "있는지 확인하려면 새 디렉터리를 생성하고 디렉터리의 사용자" +#~ " ID를 ``49999``로 변경하는 것이 좋습니다. 나중에 " +#~ "디렉터리를 삭제하려면 ``sudo chown -R $USER:$(id" +#~ " -gn) state``를 실행하여 사용자 ID를 현재 " +#~ "사용자 ID로 다시 변경할 수 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr "" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" +#~ "필요한 모든 파일이 로컬``certificates`` 디렉터리에 있다고" +#~ " 가정하면, ``--volume``플래그를 사용하여 로컬 디렉터리를 " +#~ "컨테이너의 ``/app/certificates/`` 디렉터리에 마운트할 수 " +#~ "있습니다. 이렇게 하면 SuperLink 가 컨테이너 내의" +#~ " 파일에 액세스할 수 있습니다. ``ro``는 ``read-" +#~ "only``을 의미합니다. Docker 볼륨은 기본적으로 " +#~ "``read-write``로 설정되어 있는데, 이 옵션을 사용하면" +#~ " 볼륨을 ``read-only``으로 만들 수 있습니다. " +#~ "마지막으로 인증서 및 키 파일의 이름을 ``--ssl-" +#~ "ca-certfile``, ``--ssl-certfile`` 및 " +#~ "``--ssl-keyfile`` 플래그와 함께 SuperLink에 전달합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" +#~ "SuperNode Docker 이미지는 Flower의 사전 설치된 " +#~ "버전과 함께 제공되며, 자체 SuperNode 이미지를 " +#~ "구축하기 위한 기반 역할을 합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr "" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" +#~ "Flower 레포지토리에서 찾을 수 있는 ``quickstart-" +#~ "pytorch`` 예제를 사용하여 ClientApp을 도커라이즈하는 " +#~ "방법을 설명하겠습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "" +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" +#~ "시작하기 전에 로컬 개발 환경에서 몇 가지 전제" +#~ " 조건을 충족해야 합니다. 'quickstart-pytorch' " +#~ "예제 대신 ClientApp을 실행하려는 경우 첫 번째 " +#~ "부분을 건너뛸 수 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr "" +#~ msgid "Let's assume the following project layout:" +#~ msgstr "다음과 같은 프로젝트 레이아웃을 가정해 보겠습니다:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" +#~ "먼저 ``ClientApp`` 코드가 있는 디렉토리에 " +#~ "``requirements.txt`` 파일을 만들어야 합니다. 이 " +#~ "파일에는 클라이언트 앱에 필요한 모든 의존성을 나열합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 -msgid "" -"Please check out :code:`utils.py` `here `_ for more details. The pre-" -"defined functions are used in the :code:`client.py` and imported. The :code:" -"`client.py` also requires to import several packages such as Flower and " -"scikit-learn:" -msgstr "" +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" +#~ "`flwr `__ 는 이미 " +#~ "``flwr/supernode`` 기본 이미지에 설치되어 있으므로, " +#~ "``torch``, ``tensorflow`` 등과 같은 다른 패키지" +#~ " dependencies만 ``requirements.txt``에 포함시키면 됩니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 -msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular image " -"classification dataset of handwritten digits for machine learning, and " -"partition the dataset for FL. This can be conveniently achieved using " -"`Flower Datasets `_. The :code:" -"`FederatedDataset.load_partition()` method loads the partitioned training " -"set for each partition ID defined in the :code:`--partition-id` argument." -msgstr "" +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "다음으로, Dockerfile을 생성합니다.``quickstart-pytorch`` " +#~ "예제를 사용하는 경우 ``examples/quickstart-pytorch``에" +#~ " ``Dockerfile.supernode``라는 새 파일을 생성합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 -msgid "" -"Next, the logistic regression model is defined and initialized with :code:" -"`utils.set_initial_params()`." -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "``Dockerfile.supernode``에는 SuperNode 이미지를 조립하는 지침이 포함되어 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 -msgid "" -"The Flower server interacts with clients through an interface called :code:" -"`Client`. When the server selects a particular client for training, it sends " -"training instructions over the network. The client receives those " -"instructions and calls one of the :code:`Client` methods to run your code (i." -"e., to fit the logistic regression we defined earlier)." -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" +#~ "처음 두 줄에서는 ``nightly`` 태그가 붙은 " +#~ "SuperNode 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 다음으로," +#~ " ``requirements.txt`` 파일을 이미지에 복사하여 " +#~ "ClientApp dependencies 요소를 설치하고 ``pip " +#~ "install``을 실행합니다. 마지막 두 줄에서 " +#~ "``client.py`` 모듈을 이미지에 복사하고 ``client:app`` " +#~ "인수를 사용하여 진입점을 ``flower-client-app``로 " +#~ "설정합니다. 인수는 클라이언트앱 내부에서 실행될 클라이언트앱의 " +#~ "객체 참조 (``:``) 입니다." + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 빌드" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 -msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which makes " -"it easier to implement the :code:`Client` interface when your workload uses " -"scikit-learn. Implementing :code:`NumPyClient` usually means defining the " -"following methods (:code:`set_parameters` is optional though):" -msgstr "" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" +#~ "이미지에 ``flwr_supernode``라는 이름을 붙이고 ``0.0.1``" +#~ " 태그를 붙였습니다. 여기서 선택한 값은 예시일 뿐이라는" +#~ " 점을 기억하세요. 필요에 따라 변경할 수 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "" +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "SuperNode Docker 이미지 실행" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "" +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "이제 SuperNode 이미지를 빌드했으니 이제 실행할 수 있습니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 -msgid "" -"We can now create an instance of our class :code:`MnistClient` and add one " -"line to actually run this client:" -msgstr "" +#~ msgid "Let's break down each part of this command:" +#~ msgstr "이 명령의 각 부분을 자세히 살펴보겠습니다:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 -msgid "" -"That's it for the client. We only have to implement :code:`Client` or :code:" -"`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a " -"client of type :code:`NumPyClient` you'll need to first call its :code:" -"`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells the client " -"which server to connect to. In our case we can run the server and the client " -"on the same machine, therefore we use :code:`\"0.0.0.0:8080\"`. If we run a " -"truly federated workload with the server and clients running on different " -"machines, all that needs to change is the :code:`server_address` we pass to " -"the client." -msgstr "" +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "``--rm``: 이 옵션은 컨테이너가 중지될 때 자동으로 제거되도록 지정합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 -msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import again " -"all required libraries such as Flower and scikit-learn." -msgstr "" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "``--insecure``: 이 옵션은 보안되지 않은 통신을 활성화합니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--superlink 192.168.1.100:9092``: 이 옵션은 SuperLinks Fleet의 주소를 지정합니다" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and " -"the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower " -"Datasets here to load the test split of the MNIST dataset for server-side " -"evaluation." -msgstr "" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "API에 연결할 수 있습니다. SuperLink IP로 업데이트하는 것을 잊지 마세요." -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 -msgid "" -"The :code:`main` contains the server-side parameter initialization :code:" -"`utils.set_initial_params()` as well as the aggregation strategy :code:`fl." -"server.strategy:FedAvg()`. The strategy is the default one, federated " -"averaging (or FedAvg), with two clients and evaluation after each federated " -"learning round. The server can be started with the command :code:`fl.server." -"start_server(server_address=\"0.0.0.0:8080\", strategy=strategy, config=fl." -"server.ServerConfig(num_rounds=3))`." -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__를 생성하고 ``--network`` argument를 " +#~ "사용하고 SuperNodes를 실행할 Docker 네트워크의 이름을" +#~ " 전달하면 됩니다." -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server first:" -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "SuperNode 바이너리에 전달됩니다. SuperNode가 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code `_ for this example can be found in :code:" -"`examples/sklearn-logreg-mnist`." -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 SuperNode 컨테이너에 마운트해야 합니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" +#~ "SuperNode 이미지와 마찬가지로 ServerApp Docker " +#~ "이미지는 Flower의 사전 설치된 버전과 함께 제공되며," +#~ " 자체 ServerApp 이미지를 구축하기 위한 기본 " +#~ "역할을 합니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "여기서는 Flower SuperNode 섹션에서와 동일한`quickstart-" +#~ "pytorch`` 예제를 사용하겠습니다. 아직 수행하지 않았다면 " +#~ "계속 진행하기 전에 `SuperNode Prerequisites`_ 을" +#~ " 따르세요." -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "ServerApp Dockerfile 만들기" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "먼저, ``ServerApp`` 코드가 있는 디렉토리에 Docker파일을" +#~ " 생성해야 합니다. ``quickstart-pytorch`` 예제를 " +#~ "사용하는 경우 ``examples/quickstart-pytorch``에 " +#~ "``Dockerfile.serverapp``이라는 새 파일을 생성합니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:21 -msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install TF " -"as well:" -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "``Dockerfile.serverapp``에는 ServerApp 이미지를 합치는 지침이 포함되어 있습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" +#~ "처음 두 줄에서는 ``1.8.0`` 태그가 붙은 " +#~ "ServerApp 이미지를 기본 이미지로 사용하고 작업 " +#~ "디렉터리를 ``/app``로 설정하도록 Docker에 지시합니다. 이제" +#~ " ``/app`` 디렉토리에서 다음 명령이 실행됩니다. 마지막" +#~ " 두 줄에서는 ``server.py`` 모듈을 이미지에 복사하고" +#~ " ``server:app`` argument를 사용하여 진입점을 " +#~ "``flower-server-app``로 설정합니다. 인수는 ServerApp" +#~ " 컨테이너 내에서 실행될 ServerApp의 객체 " +#~ "참조(``:``)입니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:38 -msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image " -"classification dataset for machine learning. The call to :code:`tf.keras." -"datasets.cifar10.load_data()` downloads CIFAR10, caches it locally, and then " -"returns the entire training and test set as NumPy ndarrays." -msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 빌드" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 -msgid "" -"Next, we need a model. For the purpose of this tutorial, we use MobilNetV2 " -"with 10 output classes:" -msgstr "" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "ServerApp Docker 이미지 실행" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 -msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which makes " -"it easier to implement the :code:`Client` interface when your workload uses " -"Keras. The :code:`NumPyClient` interface defines three methods which can be " -"implemented in the following way:" -msgstr "" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "이제 ServerApp 이미지를 빌드했으니 이제 실행할 수 있습니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--superlink 192.168.1.100:9091``: 이 옵션은 SuperLinks 드라이버의 주소를 지정합니다" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 -msgid "" -"You should now see how the training does in the very first terminal (the one " -"that started the server):" -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" +#~ "로컬에서 Flower를 실행하는 것을 테스트하려면 `bridge " +#~ "network `__,를 생성하고 ``--network`` argument를 " +#~ "사용하여 ServerApp을 실행할 Docker 네트워크의 이름을 " +#~ "전달하면 됩니다." -#: ../../source/tutorial-quickstart-tensorflow.rst:169 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code `_ for this can be found in :" -"code:`examples/quickstart-tensorflow/client.py`." -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" +#~ "태그 뒤에 오는 모든 argument는 Flower " +#~ "ServerApp 바이너리에 전달됩니다. ServerApp에서 지원하는 " +#~ "사용 가능한 모든 플래그를 보려면 실행하세요:" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower with " -"XGBoost to train classification models on trees." -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "SSL을 사용하려면 PEM 인코딩된 루트 인증서를 ServerApp 컨테이너에 마운트해야 합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "인증서가 이미 로컬에 존재한다고 가정하면, ``--volume`` " +#~ "플래그를 사용하여 로컬 인증서를 컨테이너의 ``/app/`` " +#~ "디렉터리에 마운트할 수 있습니다. 이렇게 하면 " +#~ "ServerApp이 컨테이너 내의 인증서에 액세스할 수 " +#~ "있습니다. 컨테이너를 시작할 때 ``--root-" +#~ "certificates`` 플래그를 사용하세요." -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" -msgstr "" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" -#: ../../source/tutorial-quickstart-xgboost.rst:16 -msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that maximises " -"the computational boundaries for boosted tree methods. It's primarily " -"designed to enhance both the performance and computational speed of machine " -"learning models. In XGBoost, trees are constructed concurrently, unlike the " -"sequential approach taken by GBDT." -msgstr "" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_supernode `\\ \\(\\)" -#: ../../source/tutorial-quickstart-xgboost.rst:20 -msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning techniques." -msgstr "" +#~ msgid "d defaults to None." +#~ msgstr "d는 기본값이 None입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" -msgstr "" +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "dict/iterable E 및 F에서 R을 업데이트합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:25 -msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems for " -"specialised applications, like survival analysis and financial fraud " -"detection." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" -#: ../../source/tutorial-quickstart-xgboost.rst:27 -msgid "" -"Federated learning ensures that raw data remains on the local device, making " -"it an attractive approach for sensitive domains where data security and " -"privacy are paramount. Given the robustness and efficiency of XGBoost, " -"combining it with federated learning offers a promising solution for these " -"specific challenges." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" -#: ../../source/tutorial-quickstart-xgboost.rst:30 -msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart `_) with two *clients* and one *server* to " -"demonstrate how federated XGBoost works, and then we dive into a more " -"complex example (`full code xgboost-comprehensive `_) to run various " -"experiments." -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" -msgstr "" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" -#: ../../source/tutorial-quickstart-xgboost.rst:41 -msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" -#: ../../source/tutorial-quickstart-xgboost.rst:47 -msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" -#: ../../source/tutorial-quickstart-xgboost.rst:57 -msgid "" -"*Clients* are responsible for generating individual weight-updates for the " -"model based on their local datasets. Now that we have all our dependencies " -"installed, let's run a simple distributed training with two clients and one " -"server." -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" -#: ../../source/tutorial-quickstart-xgboost.rst:60 -msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets " -"and other related functions:" -msgstr "" +#~ msgid "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" -msgstr "" +#~ msgid "" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "" +#~ "클라이언트 앱의 특정 데이터 파티션을 로드할 때 " +#~ "사용할 수 있는 식별자입니다. 시뮬레이션을 수행할 때 " +#~ "이 식별자를 사용하는 것이 더 적절합니다." -#: ../../source/tutorial-quickstart-xgboost.rst:89 -msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower " -"Datasets and conduct data partitioning for FL:" -msgstr "" +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" -#: ../../source/tutorial-quickstart-xgboost.rst:102 -msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load the " -"partition for the given client based on :code:`node_id`:" -msgstr "" +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "클라이언트앱이 사용해야 하는 데이터 파티션을 알려주는 식별자입니다." -#: ../../source/tutorial-quickstart-xgboost.rst:121 -msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." -msgstr "" +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 -msgid "" -"The functions of :code:`train_test_split` and :code:" -"`transform_dataset_to_dmatrix` are defined as below:" -msgstr "" +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "" +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "Flower SuperLink(Driver API 및 Fleet API)를 실행하세요." -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "" -"The :code:`num_local_round` represents the number of iterations for local " -"tree boost. We use CPU for the training in default. One can shift it to GPU " -"by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as evaluation " -"metric." -msgstr "" +#~ msgid "" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" -msgstr "" +#~ msgid ":py:obj:`flwr.server.strategy `\\" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 -msgid "" -"After loading the dataset we define the Flower client. We follow the general " -"rule to define :code:`XgbClient` class inherited from :code:`fl.client." -"Client`." -msgstr "" +#~ msgid ":py:obj:`flwr.server.workflow `\\" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:193 -msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures " -"for training." -msgstr "" +#~ msgid "run\\_driver\\_api" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 -msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` " -"methods insides :code:`XgbClient` class as follows." -msgstr "" +#~ msgid "run\\_fleet\\_api" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:210 -msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use :code:`get_parameters` " -"and :code:`set_parameters` to initialise model parameters for XGBoost. As a " -"result, let's return an empty tensor in :code:`get_parameters` when it is " -"called by the server at the first round." -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 -msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up " -"the first set of trees. the returned Booster object and config are stored " -"in :code:`self.bst` and :code:`self.config`, respectively. From the second " -"round, we load the global model sent from server to :code:`self.bst`, and " -"then update model weights on local training data with function :code:" -"`local_boost` as follows:" -msgstr "" +#~ msgid "key shares." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 -msgid "" -"Given :code:`num_local_round`, we update trees by calling :code:`self.bst." -"update` method. After training, the last :code:`N=num_local_round` trees " -"will be extracted to send to the server." -msgstr "" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:291 -msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to conduct " -"evaluation on valid set. The AUC value will be returned." -msgstr "" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 -msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one " -"line to actually run this client:" -msgstr "" +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 -msgid "" -"That's it for the client. We only have to implement :code:`Client`and call :" -"code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` tells the " -"client which server to connect to. In our case we can run the server and the " -"client on the same machine, therefore we use :code:`\"[::]:8080\"`. If we " -"run a truly federated workload with the server and clients running on " -"different machines, all that needs to change is the :code:`server_address` " -"we point the client at." -msgstr "" +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:311 -msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version of " -"the model back to each *client* to finish a complete FL round." -msgstr "" +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:314 -msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from :" -"code:`flwr.server.strategy`." -msgstr "" +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." -msgstr "" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 -msgid "" -"We use two clients for this example. An :code:`evaluate_metrics_aggregation` " -"function is defined to collect and wighted average the AUC values from " -"clients." -msgstr "" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" -msgstr "" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:356 -msgid "" -"You must be curious about how bagging aggregation works. Let's look into the " -"details." -msgstr "" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:358 -msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:" -"`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. Then, we " -"override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:" -"`evaluate` methods as follows:" -msgstr "" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:454 -msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" -msgstr "" +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:513 -msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling :code:" -"`_get_tree_nums`. Then, the fetched information will be aggregated. After " -"that, the trees (containing model weights) are aggregated to generate a new " -"tree model." -msgstr "" +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:518 -msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" -msgstr "" +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:585 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in :code:" -"`metrics_distributed`. One can see that the average AUC increases over FL " -"rounds." -msgstr "" +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:590 -msgid "" -"The full `source code `_ for this example can be found in :code:`examples/" -"xgboost-quickstart`." -msgstr "" +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" -msgstr "" +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:596 -msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time to " -"run some more comprehensive experiments by customising the experimental " -"settings. In the xgboost-comprehensive example (`full code `_), we provide " -"more options to define various experimental setups, including aggregation " -"strategies, data partitioning and centralised/distributed evaluation. We " -"also support :doc:`Flower simulation ` making it " -"easy to simulate large client cohorts in a resource-aware manner. Let's take " -"a look!" -msgstr "" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" -msgstr "" +#~ msgid "receive the updated local model weights" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 -msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, which " -"performs FL in a client-by-client fashion. Instead of aggregating multiple " -"clients, there is only one single client participating in the training per " -"round in the cyclic training scenario. The trained local XGBoost trees will " -"be passed to the next client as an initialised model for next round's " -"boosting." -msgstr "" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:609 -msgid "" -"To do this, we first customise a :code:`ClientManager` in :code:" -"`server_utils.py`:" -msgstr "" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:649 -msgid "" -"The customised :code:`ClientManager` samples all available clients in each " -"FL round based on the order of connection to the server. Then, we define a " -"new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy." -"fedxgb_cyclic.py`, in order to sequentially select only one client in given " -"round and pass the received model to next client." -msgstr "" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:690 -msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model by " -"overriding :code:`aggregate_fit`." -msgstr "" +#~ msgid "" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate` " -"methods ensure the clients to be sequentially selected given FL round:" -msgstr "" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" -msgstr "" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:759 -msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to " -"instantiate the data partitioner based on the given :code:`num_partitions` " -"and :code:`partitioner_type`. Currently, we provide four supported " -"partitioner type to simulate the uniformity/non-uniformity in data quantity " -"(uniform, linear, square, exponential)." -msgstr "" +#~ msgid "" +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" -msgstr "" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -msgid "" -"To facilitate centralised evaluation, we define a function in :code:" -"`server_utils.py`:" -msgstr "" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:824 -msgid "" -"This function returns a evaluation function which instantiates a :code:" -"`Booster` object and loads the global model weights to it. The evaluation is " -"conducted by calling :code:`eval_set()` method, and the tested AUC value is " -"reported." -msgstr "" +#~ msgid "" +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -msgid "" -"As for distributed evaluation on the clients, it's same as the quick-start " -"example by overriding the :code:`evaluate()` method insides the :code:" -"`XgbClient` class in :code:`client_utils.py`." -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" -msgstr "" +#~ msgid "" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:832 -msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a " -"single machine or a cluster of machines." -msgstr "" +#~ msgid "Let's build a new ``Strategy`` from scratch!" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:866 -msgid "" -"After importing all required packages, we define a :code:`main()` function " -"to perform the simulation process:" -msgstr "" +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, the " -"clients won't need to pre-process their partitions again." -msgstr "" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" -msgstr "" +#~ msgid "" +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -msgid "" -"After that, we start the simulation by calling :code:`fl.simulation." -"start_simulation`:" -msgstr "" +#~ msgid "" +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" -msgstr "" +#~ msgid "" +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "" +#~ msgid "" +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server and " -"simulation, allowing users to specify different experimental settings. Let's " -"first see the sever side:" -msgstr "" +#~ msgid "" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and " -"evaluation fashion. Note that with :code:`--centralised-eval`, the sever " -"will do centralised evaluation and all functionalities for client evaluation " -"will be disabled." -msgstr "" +#~ msgid "" +#~ "`Check out Flower Code Examples " +#~ "`__" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "" +#~ msgid "" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 -msgid "" -"This defines various options for client data partitioning. Besides, clients " -"also have an option to conduct evaluation on centralised test set by " -"setting :code:`--centralised-eval`, as well as an option to perform scaled " -"learning rate based on the number of clients by setting :code:`--scaled-lr`." -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" -msgstr "" +#~ msgid "Loading the data" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." -msgstr "" +#~ msgid "" +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" -msgstr "" +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 -msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 clients " -"with exponential distribution for 50 rounds, we first start the server as " -"below:" -msgstr "" +#~ msgid "" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" -msgstr "" +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" -msgstr "" +#~ msgid "" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" +#~ msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -msgid "" -"The full `code `_ for this comprehensive example can be found in :code:" -"`examples/xgboost-comprehensive`." -msgstr "" +#~ msgid "" +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "" +#~ msgid "Defining the model" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 -msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies can be " -"used to customize the execution on both the server and the clients (`part 2 " -"`__)." -msgstr "" +#~ msgid "Training the model" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll continue to customize the federated learning system " -"we built previously by creating a custom version of FedAvg (again, using " -"`Flower `__ and `PyTorch `__)." -msgstr "" +#~ msgid "" +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join the " -"Flower community on Slack to connect, ask questions, and get help: `Join " -"Slack `__ 🌼 We'd love to hear from you in the " -"``#introductions`` channel! And if anything is unclear, head over to the " -"``#questions`` channel." -msgstr "" +#~ msgid "" +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "" +#~ msgid "Updating model parameters" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "" +#~ msgid "" +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 -msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid "" +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "" +#~ msgid "Implementing a Flower client" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "" +#~ msgid "" +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 -msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled (on " -"Google Colab: ``Runtime > Change runtime type > Hardware acclerator: GPU > " -"Save``). Note, however, that Google Colab is not always able to offer GPU " -"acceleration. If you see an error related to GPU availability in one of the " -"following sections, consider switching back to CPU-based execution by " -"setting ``DEVICE = torch.device(\"cpu\")``. If the runtime has GPU " -"acceleration enabled, you should see the output ``Training on cuda``, " -"otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid "" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "" +#~ msgid "" +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into ten " -"smaller datasets (each split into training and validation set), and wrap " -"everything in their own ``DataLoader``. We introduce a new parameter " -"``num_clients`` which allows us to call ``load_datasets`` with different " -"numbers of clients." -msgstr "" +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "" +#~ msgid "Using the Virtual Client Engine" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 -msgid "" -"Let's continue with the usual model definition (including ``set_parameters`` " -"and ``get_parameters``), training and test functions:" -msgstr "" +#~ msgid "" +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "" +#~ msgid "" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 -msgid "" -"To implement the Flower client, we (again) create a subclass of ``flwr." -"client.NumPyClient`` and implement the three methods ``get_parameters``, " -"``fit``, and ``evaluate``. Here, we also pass the ``cid`` to the client and " -"use it log additional details:" -msgstr "" +#~ msgid "Starting the training" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "" +#~ msgid "" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "" +#~ msgid "" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 -msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher " -"learning rate (potentially also other hyperparameters) to the optimizer of a " -"fraction of the clients. We will keep the sampling of the clients as it is " -"in ``FedAvg`` and then change the configuration dictionary (one of the " -"``FitIns`` attributes)." -msgstr "" +#~ msgid "" +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 -msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "" +#~ msgid "" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "" +#~ msgid "" +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 -msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom " -"strategy enables granular control over client node configuration, result " -"aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``. To " -"make custom strategies even more powerful, you can pass custom functions to " -"the constructor of your new class (``__init__``) and then call these " -"functions whenever needed." -msgstr "" +#~ msgid "" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 -msgid "" -"Before you continue, make sure to join the Flower community on Slack: `Join " -"Slack `__" -msgstr "" +#~ msgid "" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 -msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd also " -"love to hear who you are in ``#introductions``!" -msgstr "" +#~ msgid "Let's move beyond FedAvg with Flower strategies!" +#~ msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 -msgid "" -"The `Flower Federated Learning Tutorial - Part 4 `__ introduces " -"``Client``, the flexible API underlying ``NumPyClient``." -msgstr "" +#~ msgid "" +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "" +#~ msgid "" +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In the " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__), we learned how strategies can be used " -"to customize the execution on both the server and the clients (`part 2 " -"`__), and we built our own custom strategy from scratch (`part " -"3 `__)." -msgstr "" +#~ msgid "" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 -msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new baseclass " -"for building clients, simply named ``Client``. In previous parts of this " -"tutorial, we've based our client on ``NumPyClient``, a convenience class " -"which makes it easy to work with machine learning libraries that have good " -"NumPy interoperability. With ``Client``, we gain a lot of flexibility that " -"we didn't have before, but we'll also have to do a few things the we didn't " -"have to do before." -msgstr "" +#~ msgid "" +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 -msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "" +#~ msgid "" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into ten " -"smaller datasets (each split into training and validation set), and wrap " -"everything in their own ``DataLoader``." -msgstr "" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "|01471150fd5144c080a176b43e92a3ff|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgstr "|9bc21c7dbd17444a8f070c60786e3484|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 -msgid "" -"So far, we've implemented our client by subclassing ``flwr.client." -"NumPyClient``. The three methods we implemented are ``get_parameters``, " -"``fit``, and ``evaluate``. Finally, we wrap the creation of instances of " -"this class in a function called ``client_fn``:" -msgstr "" +#~ msgid "|3047bbce54b34099ae559963d0420d79|" +#~ msgstr "|3047bbce54b34099ae559963d0420d79|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 -msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" -msgstr "" +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 -msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "|c24c1478b30e4f74839208628a842d1e|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 -msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and the " -"data)." -msgstr "" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgstr "|1b3613d7a58847b59e1d3180802dbc09|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 -msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to makes " -"it look like a subclass of ``flwr.client.Client``, not ``flwr.client." -"NumPyClient``. In fact, the Flower core framework doesn't know how to handle " -"``NumPyClient``'s, it only knows how to handle ``Client``'s. ``NumPyClient`` " -"is just a convenience abstraction built on top of ``Client``." -msgstr "" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "|9980b5213db547d0b8024a50992b9e3f|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 -msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on top " -"of ``Client``." -msgstr "" +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "" +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "|032eb6fed6924ac387b9f13854919196|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 -msgid "" -"Let's try to do the same thing using ``Client`` instead of ``NumPyClient``." -msgstr "" +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "|fbf225add7fd4df5a9bf25a95597d954|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 -msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta make " -"sure our new ``Client``-based client works, right?" -msgstr "" +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "|7efbe3d29d8349b89594e8947e910525|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 -msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "" +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "|329fb3c04c744eda83bb51fa444c2266|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 -msgid "" -"First of all, it's more code. But why? The difference comes from the fact " -"that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the network, " -"it eventually needs to turn these parameters into ``bytes``. Turning " -"parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it needs " -"to serialize parameters on the server-side and send them to the client, the " -"client needs to deserialize them to use them for local training, and then " -"serialize the updated parameters again to send them back to the server, " -"which (finally!) deserializes them again in order to aggregate them with the " -"updates received from other clients." -msgstr "" +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 -msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It can " -"do so because it expects you to return parameters as NumPy ndarray's, and it " -"knows how to handle these. This makes working with machine learning " -"libraries that have good NumPy support (most of them) a breeze." -msgstr "" +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 -msgid "" -"In terms of API, there's one major difference: all methods in Client take " -"exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return exactly " -"one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return " -"values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These ``*Ins`` " -"and ``*Res`` objects in ``Client`` wrap all the individual values you're " -"used to from ``NumPyClient``." -msgstr "" +#~ msgid "run\\_supernode" +#~ msgstr "run\\_supernode" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "" +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 -msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "" +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 -msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an object. " -"This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." -msgstr "" +#~ msgid "the string key as the query for the layout." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 -msgid "" -"Federated Learning relies heavily on internet communication for training by " -"sending Python objects back and forth between the clients and the server. " -"This means that serialization is an essential part of Federated Learning." -msgstr "" +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 -msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." -msgstr "" +#~ msgid "run\\_server\\_app" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "" +#~ msgid "run\\_superlink" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 -msgid "" -"This is where the real serialization/deserialization will happen, especially " -"in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." -msgstr "" +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 -msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert our " -"arrays." -msgstr "" +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "" +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 -msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we will " -"just have to call our custom functions in our ``flwr.client.Client``." -msgstr "" +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 -msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` defined " -"above." -msgstr "" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 -msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we need " -"to serialize our local results with ``ndarrays_to_sparse_parameters``." -msgstr "" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 -msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters with " -"our custom function." -msgstr "" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 -msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change the " -"serialization and deserialization here, we only need to reimplement the " -"``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other " -"functions of the strategy will be inherited from the super class ``FedAvg``." -msgstr "" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 -msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "" +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "" +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 -msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine learning " -"libraries that have good NumPy interoperability. ``Client`` is a more " -"flexible abstraction that allows us to do things that are not possible in " -"``NumPyClient``. In order to do so, it requires us to handle parameter " -"serialization and deserialization ourselves." -msgstr "" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 -msgid "" -"This is the final part of the Flower tutorial (for now!), congratulations! " -"You're now well equipped to understand the rest of the documentation. There " -"are many topics we didn't cover in the tutorial, we recommend the following " -"resources:" -msgstr "" +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "" +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 -msgid "" -"`Check out Flower Code Examples `__" -msgstr "" +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 -msgid "" -"`Use Flower Baselines for your research `__" -msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 -msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "" +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "" +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In part " -"1, we use PyTorch for the model training pipeline and data loading. In part " -"2, we continue to federate the PyTorch-based pipeline using Flower." -msgstr "" +#~ msgid "" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "" +#~ msgid "Dependencies" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 -msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "" +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower (``flwr``):" -msgstr "" +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled (on " -"Google Colab: ``Runtime > Change runtime type > Hardware accelerator: GPU > " -"Save``). Note, however, that Google Colab is not always able to offer GPU " -"acceleration. If you see an error related to GPU availability in one of the " -"following sections, consider switching back to CPU-based execution by " -"setting ``DEVICE = torch.device(\"cpu\")``. If the runtime has GPU " -"acceleration enabled, you should see the output ``Training on cuda``, " -"otherwise it'll say ``Training on cpu``." -msgstr "" +#~ msgid "Handling the data" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "" +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -msgid "" -"Federated learning can be applied to many different types of tasks across " -"different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular CIFAR-10 " -"dataset. CIFAR-10 can be used to train image classifiers that distinguish " -"between images from ten different classes: 'airplane', 'automobile', 'bird', " -"'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and 'truck'." -msgstr "" +#~ msgid "Training and testing the model" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 -msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the " -"original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely for " -"experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the data " -"is naturally partitioned)." -msgstr "" +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -msgid "" -"Each organization will act as a client in the federated learning system. So " -"having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "" +#~ msgid "Creating the model itself" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training and " -"test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" -msgstr "" +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten different " -"organizations. Each ``trainloader``/``valloader`` pair contains 4000 " -"training examples and 1000 validation examples. There's also a single " -"``testloader`` (we did not split the test set). Again, this is only " -"necessary for building research or educational systems, actual federated " -"learning systems have their data naturally distributed across multiple " -"partitions." -msgstr "" +#~ msgid "Federating the example" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 -msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "" +#~ msgid "Creating the IMDBClient" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 -msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another batch " -"of images." -msgstr "" +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "" +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 -msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ `__." -msgstr "" +#~ msgid "Starting the server" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "" +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 -msgid "" -"We use the simple CNN described in the `PyTorch tutorial `__:" -msgstr "" +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "" +#~ msgid "Putting everything together" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "" +#~ msgid "We can now start client instances using:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 -msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train the " -"model on the dataset of one of our organizations (``trainloaders[0]``). This " -"simulates the reality of most machine learning projects today: each " -"organization has their own data and trains models only on this internal data:" -msgstr "" +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 -msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result in " -"a test set accuracy of about 41%, which is not good, but at the same time, " -"it doesn't really matter for the purposes of this tutorial. The intent was " -"just to show a simplistic centralized training pipeline that sets the stage " -"for what comes next - federated learning!" -msgstr "" +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "" +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 -msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was in " -"one place (i.e., a single ``trainloader`` and a single ``valloader``). Next, " -"we'll simulate a situation where we have multiple datasets in multiple " -"organizations and where we train a model over these organizations using " -"federated learning." -msgstr "" +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 -msgid "" -"In federated learning, the server sends the global model parameters to the " -"client, and the client updates the local model with the parameters received " -"from the server. It then trains the model on the local data (which changes " -"the model parameters locally) and sends the updated/changed model parameters " -"back to the server (or, alternatively, it sends just the gradients back to " -"the server, not the full model parameters)." -msgstr "" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 -msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the " -"local model: ``set_parameters`` and ``get_parameters``. The following two " -"functions do just that for the PyTorch model above." -msgstr "" +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 -msgid "" -"The details of how this works are not really important here (feel free to " -"consult the PyTorch documentation if you want to learn more). In essence, we " -"use ``state_dict`` to access PyTorch model parameter tensors. The parameter " -"tensors are then converted to/from a list of NumPy ndarray's (which Flower " -"knows how to serialize/deserialize):" -msgstr "" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "" +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 -msgid "" -"With that out of the way, let's move on to the interesting part. Federated " -"learning systems consist of a server and multiple clients. In Flower, we " -"create clients by implementing subclasses of ``flwr.client.Client`` or " -"``flwr.client.NumPyClient``. We use ``NumPyClient`` in this tutorial because " -"it is easier to implement and requires us to write less boilerplate." -msgstr "" +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 -msgid "" -"To implement the Flower client, we create a subclass of ``flwr.client." -"NumPyClient`` and implement the three methods ``get_parameters``, ``fit``, " -"and ``evaluate``:" -msgstr "" +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 -msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters to " -"the server" -msgstr "" +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 -msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the model " -"parameters on the local data, and return the evaluation result to the server" -msgstr "" +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 -msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 -msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through " -"``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a " -"*single client* in our federated learning system. Federated learning systems " -"have multiple clients (otherwise, there's not much to federate), so each " -"client will be represented by its own instance of ``FlowerClient``. If we " -"have, for example, three clients in our workload, then we'd have three " -"instances of ``FlowerClient``. Flower calls ``FlowerClient.fit`` on the " -"respective instance when the server selects a particular client for training " -"(and ``FlowerClient.evaluate`` for evaluation)." -msgstr "" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 -msgid "" -"In this notebook, we want to simulate a federated learning system with 10 " -"clients on a single machine. This means that the server and all 10 clients " -"will live on a single machine and share resources such as CPU, GPU, and " -"memory. Having 10 clients would mean having 10 instances of ``FlowerClient`` " -"in memory. Doing this on a single machine can quickly exhaust the available " -"memory resources, even if only a subset of these clients participates in a " -"single round of federated learning." -msgstr "" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 -msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a function " -"called ``client_fn`` that creates a ``FlowerClient`` instance on demand. " -"Flower calls ``client_fn`` whenever it needs an instance of one particular " -"client to call ``fit`` or ``evaluate`` (those instances are usually " -"discarded after use, so they should not keep any local state). Clients are " -"identified by a client ID, or short ``cid``. The ``cid`` can be used, for " -"example, to load different local data partitions for different clients, as " -"can be seen below:" -msgstr "" +#~ msgid "Each client will have its own dataset." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 -msgid "" -"We now have the class ``FlowerClient`` which defines client-side training/" -"evaluation and ``client_fn`` which allows Flower to create ``FlowerClient`` " -"instances whenever it needs to call ``fit`` or ``evaluate`` on one " -"particular client. The last step is to start the actual simulation using " -"``flwr.simulation.start_simulation``." -msgstr "" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 -msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the number " -"of clients to simulate (``num_clients``), the number of federated learning " -"rounds (``num_rounds``), and the strategy. The strategy encapsulates the " -"federated learning approach/algorithm, for example, *Federated Averaging* " -"(FedAvg)." -msgstr "" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 -msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated " -"learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last step " -"is the actual call to ``start_simulation`` which - you guessed it - starts " -"the simulation:" -msgstr "" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format -msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 clients " -"(``num_clients=10``). Flower then goes ahead an asks the ``FedAvg`` strategy " -"to select clients. ``FedAvg`` knows that it should select 100% of the " -"available clients (``fraction_fit=1.0``), so it goes ahead and selects 10 " -"random clients (i.e., 100% of 10)." -msgstr "" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 -msgid "" -"Flower then asks the selected 10 clients to train the model. When the server " -"receives the model parameter updates from the clients, it hands those " -"updates over to the strategy (*FedAvg*) for aggregation. The strategy " -"aggregates those updates and returns the new global model, which then gets " -"used in the next round of federated learning." -msgstr "" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 -msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` are " -"empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" -msgstr "" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 -msgid "" -"Flower can automatically aggregate losses returned by individual clients, " -"but it cannot do the same for metrics in the generic metrics dictionary (the " -"one with the ``accuracy`` key). Metrics dictionaries can contain very " -"different kinds of metrics and even key/value pairs that are not metrics at " -"all, so the framework does not (and can not) know how to handle these " -"automatically." -msgstr "" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 -msgid "" -"As users, we need to tell the framework how to handle/aggregate these custom " -"metrics, and we do so by passing metric aggregation functions to the " -"strategy. The strategy will then call these functions whenever it receives " -"fit or evaluate metrics from clients. The two possible functions are " -"``fit_metrics_aggregation_fn`` and ``evaluate_metrics_aggregation_fn``." -msgstr "" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 -msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 -msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 -msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom " -"evaluation metrics and calculates a single ``accuracy`` metric across all " -"clients on the server side." -msgstr "" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 -msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial will " -"cover centralized evaluation." -msgstr "" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 -msgid "" -"Congratulations, you just trained a convolutional neural network, federated " -"over 10 clients! With that, you understand the basics of federated learning " -"with Flower. The same approach you've seen can be used with other machine " -"learning frameworks (not just PyTorch) and tasks (not just CIFAR-10 images " -"classification), for example NLP with Hugging Face Transformers or speech " -"with SpeechBrain." -msgstr "" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 -msgid "" -"In the next notebook, we're going to cover some more advanced concepts. Want " -"to customize your strategy? Initialize parameters on the server side? Or " -"evaluate the aggregated model on the server side? We'll cover all this and " -"more in the next tutorial." -msgstr "" +#~ msgid "|cc080a555947492fa66131dc3a967603|" +#~ msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 -msgid "" -"The `Flower Federated Learning Tutorial - Part 2 `__ goes " -"into more depth about strategies and all the advanced things you can build " -"with them." -msgstr "" +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 -msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and " -"Flower (`part 1 `__)." -msgstr "" +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 -msgid "" -"In this notebook, we'll begin to customize the federated learning system we " -"built in the introductory notebook (again, using `Flower `__ and `PyTorch `__)." -msgstr "" +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" + +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "" +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "" +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 -msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of new " -"features." -msgstr "" +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "" +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 -msgid "" -"Flower, by default, initializes the global model by asking one random client " -"for the initial parameters. In many cases, we want more control over " -"parameter initialization though. Flower therefore allows you to directly " -"pass the initial parameters to the Strategy:" -msgstr "" +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 -msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower " -"from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." -msgstr "" +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "" +#~ msgid "" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 -msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number of " -"arguments, amongst them the ``client_fn`` used to create ``FlowerClient`` " -"instances, the number of clients to simulate ``num_clients``, the number of " -"rounds ``num_rounds``, and the strategy." -msgstr "" +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 -msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different strategy " -"this time:" -msgstr "" +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "" +#~ msgid "" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 -msgid "" -"Flower can evaluate the aggregated model on the server-side or on the client-" -"side. Client-side and server-side evaluation are similar in some ways, but " -"different in others." -msgstr "" +#~ msgid "" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 -msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly aggregated " -"model after each round of training without having to send the model to " -"clients. We're also fortunate in the sense that our entire evaluation " -"dataset is available at all times." -msgstr "" +#~ msgid "" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 -msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, but " -"also more powerful: it doesn't require a centralized dataset and allows us " -"to evaluate models over a larger set of data, which often yields more " -"realistic evaluation results. In fact, many scenarios require us to use " -"**Federated Evaluation** if we want to get representative evaluation results " -"at all. But this power comes at a cost: once we start to evaluate on the " -"client side, we should be aware that our evaluation dataset can change over " -"consecutive rounds of learning if those clients are not always available. " -"Moreover, the dataset held by each client can also change over consecutive " -"rounds. This can lead to evaluation results that are not stable, so even if " -"we would not change the model, we'd see our evaluation results fluctuate " -"over consecutive rounds." -msgstr "" +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 -msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see how " -"we can evaluate aggregated model parameters on the server-side:" -msgstr "" +#~ msgid "" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "" +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 -msgid "" -"In some situations, we want to configure client-side execution (training, " -"evaluation) from the server-side. One example for that is the server asking " -"the clients to train for a certain number of local epochs. Flower provides a " -"way to send configuration values from the server to the clients using a " -"dictionary. Let's look at an example where the clients receive values from " -"the server through the ``config`` parameter in ``fit`` (``config`` is also " -"available in ``evaluate``). The ``fit`` method receives the configuration " -"dictionary through the ``config`` parameter and can then read values from " -"this dictionary. In this example, it reads ``server_round`` and " -"``local_epochs`` and uses those values to improve the logging and configure " -"the number of local training epochs:" -msgstr "" +#~ msgid "" +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 -msgid "" -"So how can we send this config dictionary from server to clients? The built-" -"in Flower Strategies provide way to do this, and it works similarly to the " -"way server-side evaluation works. We provide a function to the strategy, and " -"the strategy calls this function for every round of federated learning:" -msgstr "" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before starting " -"the simulation:" -msgstr "" +#~ msgid "" +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 -msgid "" -"As we can see, the client logs now include the current round of federated " -"learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second " -"round of federated learning, and then for two epochs during the third round." -msgstr "" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 -msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and used " -"this concept throughout this notebook without mentioning it explicitly: our " -"``FlowerClient`` returns a dictionary containing a custom key/value pair as " -"the third return value in ``evaluate``." -msgstr "" +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 -msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "" +#~ msgid ":code:`get_model_parameters()`" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format -msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is quite " -"small, we should probably train the model a bit longer, so we configure the " -"clients to perform 3 local training epochs. We should also adjust the " -"fraction of clients selected for training during each round (we don't want " -"all 1000 clients participating in every round), so we adjust " -"``fraction_fit`` to ``0.05``, which means that only 5% of available clients " -"(so 50 clients) will be selected for training each round:" -msgstr "" +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 -msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "" +#~ msgid ":code:`set_model_params()`" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 -msgid "" -"In the later sections, we've seen how we can communicate arbitrary values " -"between server and clients to fully customize client-side execution. With " -"that capability, we built a large-scale Federated Learning simulation using " -"the Flower Virtual Client Engine and ran an experiment involving 1000 " -"clients in the same workload - all in a Jupyter Notebook!" -msgstr "" +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 -msgid "" -"The `Flower Federated Learning Tutorial - Part 3 `__ shows how " -"to build a fully custom ``Strategy`` from scratch." -msgstr "" +#~ msgid ":code:`set_initial_params()`" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "연합 학습이란 무엇입니까?" +#~ msgid "" +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 -msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated learning " -"systems that approach the current state of the art in the field." -msgstr "" -"이 튜토리얼에서 연합 학습이 무엇인지 배우고 Flower로 첫 번째 시스템을 " -"구축하고 점진적으로 확장해 나갈 것입니다. 본 튜토리얼의 모든 부분을 완성할 " -"수 있다면, 당신은 고급 연합 학습 시스템을 구축하여 그 분야의 현재 최고 기술 " -"수준에 접근할 수 있을 것입니다." +#~ msgid "" +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 -msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with federated " -"learning. Only a basic understanding of data science and Python programming " -"is assumed." -msgstr "" -"🧑‍🏫이 튜토리얼은 사전 지식을 많이 필요로 하지 않으며 연합 학습에 대해 " -"상세히알 필요는 없습니다. 데이터 과학과 파이썬 프로그래밍에 대한 기본적인 " -"이해만 가정합니다." +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join the " -"open-source Flower community on Slack to connect, ask questions, and get " -"help: `Join Slack `__ 🌼 We'd love to hear " -"from you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." -msgstr "" -"`Star Flower on GitHub `__ ⭐️ Slack의 " -"오픈소스 Flower 커뮤니티에 가입하여 소통하고 질문하고 도움을 받을 수 " -"있습니다: `Slack 가입`__ 🌼 ``#introductions``" -"채널에서 당신의 목소리를 듣고 싶습니다! 궁금한 점이 있으시면``#questions`` " -"채널로 방문해 주시기 바랍니다." +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "전통적인 머신러닝(기계학습)" +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 -msgid "" -"Before we begin to discuss federated learning, let us quickly recap how most " -"machine learning works today." -msgstr "연합 학습에 대해 논의하기 전에 현재 대부분의 머신러닝이 어떻게 작동하는지 " -"간략히 요약하겠습니다." +#~ msgid ":code:`set_parameters` (optional)" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 -msgid "" -"In machine learning, we have a model, and we have data. The model could be a " -"neural network (as depicted here), or something else, like classical linear " -"regression." -msgstr "" -"머신러닝에서 우리는 모델과 데이터를 가지고 있습니다. 모델은 신경망(그림과 " -"같이)일 수도 있고 고전적인 선형 회귀와 같은 다른 것일 수도 있습니다." +#~ msgid "is directly imported with :code:`utils.set_model_params()`" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" -msgstr "|93b02017c78049bbbd5ae456dcb2c91b|" +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MnistClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "모델과 데이터" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 -msgid "" -"We train the model using the data to perform a useful task. A task could be " -"to detect objects in images, transcribe an audio recording, or play a game " -"like Go." -msgstr "" -"우리는 유용한 작업을 수행하기 위해 데이터를 사용하여 모델을 훈련합니다. " -"작업은 이미지 속 물체를 감지하거나 음성 녹음을 기록하거나 바둑과 같은 게임을 " -"하는 것일 수 있습니다." +#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" -msgstr "|01471150fd5144c080a176b43e92a3ff|" +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "데이터를 이용한 모델 훈련" +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 -msgid "" -"Now, in practice, the training data we work with doesn't originate on the " -"machine we train the model on. It gets created somewhere else." -msgstr "실제로 우리가 사용하는 훈련 데이터는 모델을 훈련시키는 기계에서 비롯된 것이 " -"아닙니다. 그 데이터는 다른 곳에서 만들어졌습니다." +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "" -"스마트폰에서 사용자와 앱의 상호 작용, 센서 데이터를 수집하는 자동차, " -"키보드를 통해 입력을 받는 노트북 또는 누군가 노래를 부르리는 것을 듣는 " -"스마트 스피커에서 비롯됩니다." +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" -msgstr "|9bc21c7dbd17444a8f070c60786e3484|" +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "핸드폰에 있는 데이터" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 -msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running " -"the same app. But it could also be several organizations, all generating " -"data for the same task." -msgstr "" -"또한 중요한 것은 이 \"다른 곳\"이 보통 한 곳만 아니라 여러 곳이라는 " -"것입니다. 같은 앱을 실행하는 여러 기기일 수도 있습니다. 하지만 여러 조직이 " -"모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" -msgstr "|3047bbce54b34099ae559963d0420d79|" +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "데이터가 여러 장치에 있습니다" +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 -msgid "" -"So to use machine learning, or any kind of data analysis, the approach that " -"has been used in the past was to collect all data on a central server. This " -"server can be somewhere in a data center, or somewhere in the cloud." -msgstr "" -"따라서 머신러닝이나 어떤 종류의 데이터 분석을 이용하려면 과거에는 중앙 " -"서버에서 모든 데이터를 수집하는 방법이 사용되었습니다. 이 서버는 데이터 센터 " -"어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" -msgstr "|e9f8ce948593444fb838d2f354c7ec5d|" +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "중앙 데이터 수집" +#~ msgid "" +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "" -"모든 데이터가 한 곳에 모이면, 우리는 궁극적으로 머신러닝 알고리즘을 사용하여 " -"데이터에서 모델을 훈련시킬 수 있습니다. 이것이 바로 우리가 기본적으로 사용해 " -"온 머신러닝 방법입니다." +#~ msgid "" +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" -msgstr "|c24c1478b30e4f74839208628a842d1e|" +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "중앙 데이터 훈련" +#~ msgid "" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "클래식 머신러닝의 어려움" +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 -msgid "" -"The classic machine learning approach we've just seen can be used in some " -"cases. Great examples include categorizing holiday photos, or analyzing web " -"traffic. Cases, where all the data is naturally available on a centralized " -"server." -msgstr "" -"우리가 방금 본 전통적 머신러닝의 접근 방식은 경우에 따라 다르게 사용될 수 " -"있습니다. 좋은 예로는 휴일 사진을 분류하거나 웹 트래픽을 분석하는 것이 " -"있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" -msgstr "|1b3613d7a58847b59e1d3180802dbc09|" +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "집중화 가능" +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 -msgid "" -"But the approach can not be used in many other cases. Cases, where the data " -"is not available on a centralized server, or cases where the data available " -"on one server is not enough to train a good model." -msgstr "" -"그러나 이 방법은 다른 많은 경우에 적용되지 않을 수 있습니다. 예를 들어, 중앙 " -"집중식 서버에 데이터가 없거나 서버의 데이터가 좋은 모델을 훈련하기에 " -"충분하지 않을 수 있습니다." +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" -msgstr "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "집중화 불가능" +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 -msgid "" -"There are many reasons why the classic centralized machine learning approach " -"does not work for a large number of highly important real-world use cases. " -"Those reasons include:" -msgstr "" -"전통적인 중앙 집중식 머신러닝 방법이 현실 세계에서 매우 중요한 수많은 사용 " -"사례를 충족시킬 수 없는 이유가 있습니다. 이유는 다음과 같은 여러 가지가 " -"있습니다:" +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 -msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own users' " -"data for artificial intelligence training because those users live in " -"different parts of the world, and their data is governed by different data " -"protection regulations." -msgstr "" -"**규정**: GDPR (유럽), CCPA (캘리포니아), PIPEDA (캐나다), LGPD (브라질), " -"PDPL (아르헨티나), KVKK (터키), POPI (남아프리카공화국), FSS (러시아), CDPR " -"(중국), PDPB (인도), PIPA (한국), APPI (일본), PDP (인도네시아), PDPA " -"(싱가포르), APP (호주)등의 법규로 민감한 데이터가 이동하지 않도록 보호하고 " -"있습니다. 실제로 이러한 규정은 사용자가 세계의 다른 지역에 살고 데이터가 " -"다른 데이터 보호 규정에 의해 통제되기 때문에 단일 조직이 자체 사용자 " -"데이터를 인공 지능 학습에 사용하는 것을 방지하기도 합니다." +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 -msgid "" -"**User preference**: In addition to regulation, there are use cases where " -"users just expect that no data leaves their device, ever. If you type your " -"passwords and credit card info into the digital keyboard of your phone, you " -"don't expect those passwords to end up on the server of the company that " -"developed that keyboard, do you? In fact, that use case was the reason " -"federated learning was invented in the first place." -msgstr "" -"**사용자 선호도**: 규정 외에도 일부 사용 사례에서 사용자는 데이터가 자기 " -"장치를 떠나지 않기를 예상합니다. 휴대폰의 디지털 키보드에 비밀번호와 " -"신용카드 정보를 입력하면 비밀번호가 해당 키보드를 개발한 회사의 서버에 뜨길 " -"원하지는 않겠죠? 사실, 이 사용 사례가 애당초 연합 학습이 발명된 이유였습니다." +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 -msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data volume " -"that it is neither feasible nor economic to collect all the data (due to, " -"for example, bandwidth or communication efficiency). Think about a national " -"rail service with hundreds of train stations across the country. If each of " -"these train stations is outfitted with a number of security cameras, the " -"volume of raw on-device data they produce requires incredibly powerful and " -"exceedingly expensive infrastructure to process and store. And most of the " -"data isn't even useful." -msgstr "" -"**데이터 볼륨**: 일부 센서(예:카메라)는 너무 많은 데이터 볼륨을 생성하여 " -"모든 데이터를 수집하는 것이 실현 가능하지도 않고 경제적이지도 않습니다(예: " -"대역폭 또는 통신 효율로 인해). 전국에 수백 개 기차역이 있는 국가 철도 " -"서비스를 생각해 보세요. 각 기차역에 수 많은 보안 카메라가 설치되어 있다면, " -"그들이 생산하는 대량의 미가공 된 온디바이스 데이터는 처리 및 저장을 위해 " -"엄청나게 강력하고 매우 비싼기반 구조를 필요로 합니다. 그런데 대부분의 " -"데이터는 유용하지도 않습니다." +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "중앙 집중식 머신러닝이 작동하지 않는 예는 다음과 같습니다:" +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 -msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "여러 병원의 민감한 의료기록으로 암 검진 모델 훈련" +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial fraud" -msgstr "금융 사기를 탐지하기 위한 다양한 조직의 금융 정보" +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "더 나은 범위 예측을 위해 전기 자동차의 위치 데이터" +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "더 나은 자동 완성 모델을 훈련시키기 위한 엔드 투 엔드 암호화 된 메시지" +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 -msgid "" -"The popularity of privacy-enhancing systems like the `Brave `__ browser or the `Signal `__ messenger shows " -"that users care about privacy. In fact, they choose the privacy-enhancing " -"version over other alternatives, if such an alternative exists. But what can " -"we do to apply machine learning and data science to these cases to utilize " -"private data? After all, these are all areas that would benefit " -"significantly from recent advances in AI." -msgstr "" -"`Brave `__ 브라우저나 `Signal `__ " -"메신저와 같은 개인 정보 보호 시스템의 인기는 사용자들이 개인 정보 보호에 " -"신경 쓴다는 것을 보여줍니다. 실제로 그러한 대안이 존재하는 경우 다른 " -"대안보다 개인 정보 보호 강화 버전을 선택합니다. 그런데 이러한 사례에 " -"머신러닝 및 데이터 과학을 적용하여 프라이버시 데이터를 활용하려면 어떻게 " -"해야 합니까? 이 모든 분야는 최근 AI의 발전으로 상당한 이익을 얻을 수 있는 " -"분야입니다." +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "연합 학습" +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 -msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead of " -"moving the data to the training. Here's the single-sentence explanation:" -msgstr "" -"연합 학습은 이 방법을 쉽게 뒤집었습니다. 데이터를 컴퓨팅 센터로 옮기는 대신 " -"컴퓨팅 능력을 데이터가 생성되는 장소로 이동 시킴으로써 분산된 데이터에서 " -"머신러닝을 실현합니다. 요약하자면:" +#~ msgid "" +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "중앙 집중식 머신러닝: 데이터를 컴퓨팅 센터로 이동" +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "연합(기계)학습: 컴퓨팅을 데이터로 옮김" +#~ msgid "" +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 -msgid "" -"By doing so, it enables us to use machine learning (and other data science " -"approaches) in areas where it wasn't possible before. We can now train " -"excellent medical AI models by enabling different hospitals to work " -"together. We can solve financial fraud by training AI models on the data of " -"different financial institutions. We can build novel privacy-enhancing " -"applications (such as secure messaging) that have better built-in AI than " -"their non-privacy-enhancing alternatives. And those are just a few of the " -"examples that come to mind. As we deploy federated learning, we discover " -"more and more areas that can suddenly be reinvented because they now have " -"access to vast amounts of previously inaccessible data." -msgstr "" -"이를 통해 이전에는 불가능했던 분야에서 머신러닝(및 기타 데이터 과학 방법)을 " -"사용할 수 있습니다. 이제 다양한 병원이 협력할 수 있도록 함으로써 우수한 의료 " -"AI 모델을 훈련할 수 있습니다. 다양한 금융 기관의 데이터에 대한 AI 모델을 " -"훈련하여 금융 사기를 해결할 수 있습니다. 개인 정보 보호를 강화하지 않는 " -"대안보다 더 나은 AI가 내장된 새로운 개인 정보 보호 강화 애플리케이션(예: " -"보안 메시징)을 구축할 수 있습니다. 그것들은 떠오르는 몇 가지 예에 " -"불과합니다. 연합 학습을 구축함에 따라 이전에 액세스할 수 없었던 많은 " -"데이터에 액세스할 수 있게 되었기 때문에 갑자기 재생될 수 있는 영역이 점점 더 " -"많아지고 있습니다." +#~ msgid "" +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 -msgid "" -"So how does federated learning work, exactly? Let's start with an intuitive " -"explanation." -msgstr "그렇다면 연합 학습은 어떻게 작동합니까? 직관적인 설명부터 시작하겠습니다." +#~ msgid "" +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "연합 학습의 5단계" +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "0단계: 글로벌 모델 초기화" +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 -msgid "" -"We start by initializing the model on the server. This is exactly the same " -"in classic centralized learning: we initialize the model parameters, either " -"randomly or from a previously saved checkpoint." -msgstr "" -"서버에서 모델을 초기화하는 것으로 시작합니다. 이것은 전통적인 중앙 집중식 " -"학습과도 동일합니다: 임의로 또는 이전에 저장된 체크포인트에서 모델 " -"매개변수를 초기화합니다." +#~ msgid "" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" -msgstr "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "글로벌 모델 초기화" +#~ msgid "" +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 -msgid "" -"Step 1: Send model to a number of connected organizations/devices (client " -"nodes)" -msgstr "1단계: 연결된 여러 조직/장치(클라이언트 노드)에 모델 전송" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 -msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts their " -"local training using the same model parameters. We often use only a few of " -"the connected nodes instead of all nodes. The reason for this is that " -"selecting more and more client nodes has diminishing returns." -msgstr "" -"다음으로 글로벌 모델의 파라미터를 연결된 클라이언트 노드(예: 스마트폰과 같은 " -"에지 디바이스 또는 조직에 속한 서버)로 보냅니다. 이것은 각 참여 노드가 " -"동일한 모델 매개변수를 사용하여 로컬 훈련을 시작하도록 하기 위함입니다. " -"일반적으로 모든 노드가 아닌 몇 개의 연결 노드만 사용합니다. 그 이유는 점점 " -"더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" -msgstr "|032eb6fed6924ac387b9f13854919196|" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "글로벌 모델 전송" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 -msgid "" -"Step 2: Train model locally on the data of each organization/device (client " -"node)" -msgstr "2단계: 각 조직/장치(클라이언트 노드)의 데이터에 대해 로컬로 모델 훈련" +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 -msgid "" -"Now that all (selected) client nodes have the latest version of the global " -"model parameters, they start the local training. They use their own local " -"dataset to train their own local model. They don't train the model until " -"full convergence, but they only train for a little while. This could be as " -"little as one epoch on the local data, or even just a few steps (mini-" -"batches)." -msgstr "" -"이제 모든(선택된) 클라이언트 노드에는 최신 버전의 글로벌 모델 파라미터가 " -"있으며 로컬 훈련을 시작합니다. 그들은 자신의 로컬 데이터 세트를 사용하여 " -"자신의 로컬 모델을 훈련합니다. 모델이 완전히 수렴할 때까지 훈련하지 않고 " -"잠시만 훈련합니다. 이는 로컬 데이터에서 한 단계 정도로 짧거나 몇 단계(mini-" -"batches)에 불과할 수 있습니다." +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" -msgstr "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "로컬 데이터에 대한 훈련" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "3단계: 모델 파라미터를 업데이트하여 서버로 되돌리기" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version of " -"the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the server. " -"The model updates they send can either be the full model parameters or just " -"the gradients that were accumulated during local training." -msgstr "" -"로컬 훈련 후에는 클라이언트 노드마다 원래 받은 모델 파라미터의 버전이 조금씩 " -"다릅니다. 파라미터가 다른 이유는 각 클라이언트 노드의 로컬 데이터 세트에 " -"다른 데이터가 있기 때문입니다. 그런 다음 클라이언트 노드는 이러한 모델 " -"업데이트를 서버로 다시 보냅니다. 보내는 모델 업데이트는 전체 모델 " -"파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" -msgstr "|7efbe3d29d8349b89594e8947e910525|" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "모델 업데이트 전송" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "4단계: 모델 업데이트를 새 글로벌 모델로 집계" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of the " -"original global model, each trained on the local data of one client. But " -"didn't we want to have one model that contains the learnings from the data " -"of all 100 client nodes?" -msgstr "" -"서버는 선택된 클라이언트 노드들로부터 모델 업데이트들을 수신합니다. 서버가 " -"100개의 클라이언트 노드를 선택했다면 이제 각각 클라이언트의 로컬 데이터를 " -"기반으로 훈련된 100개의 조금씩 다른 원래 글로벌 모델 버전을 갖게 됩니다. " -"하지만 우리는 100개의 모든 클라이언트 노드의 데이터에서 학습한 내용을 " -"포함하는 모델을 하나만 갖고 싶지 않았습니까?" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model updates " -"we received from the client nodes. This process is called *aggregation*, and " -"there are many different ways to do it. The most basic way to do it is " -"called *Federated Averaging* (`McMahan et al., 2016 `__), often abbreviated as *FedAvg*. *FedAvg* takes the 100 " -"model updates and, as the name suggests, averages them. To be more precise, " -"it takes the *weighted average* of the model updates, weighted by the number " -"of examples each client used for training. The weighting is important to " -"make sure that each data example has the same \"influence\" on the resulting " -"global model. If one client has 10 examples, and another client has 100 " -"examples, then - without weighting - each of the 10 examples would influence " -"the global model ten times as much as each of the 100 examples." -msgstr "" -"단일 모델 하나를 얻으려면 클라이언트 노드에서 받은 모든 모델 업데이트를 " -"결합해야 합니다. 이 과정이 *집합*라고 하며 여러 가지 방법이 있습니다. 가장 " -"기본적인 방법은*Federated Averaging* (`McMahan et al., 2016 `__)이라고 하고 보통 줄여서 *FedAvg*로 표기합니다. " -"*FedAvg* 는 100개의 모델 업데이트를 받아 이름에서 알 수 있듯이 모델 " -"업데이트를 평균화합니다. 더 정확히 말하면, 모델 업데이트의 *가중 평균* 을 각 " -"클라이언트가 훈련에 사용한 예제 수에 따라 가중치를 부여합니다. 가중치는 각 " -"데이터 예제가 결과 글로벌 모델에 동일한 \"영향\" 을 미치는지 확인하는 데 " -"중요합니다. 한 클라이언트에 10개의 데이터 포인트가 있고 다른 클라이언트에 " -"100개의 데이터 포인트가 있다면 가중치를 부여하지 않고 10개의 예가 100개의 " -"사례보다 글로벌 모델에 10배 더 많은 영향을 미칩니다." +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" -msgstr "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "모델 업데이트 집계" +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "5단계: 모델이 수렴할 때까지 1~4단계를 반복합니다" +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step 1), " -"the client nodes train on their local data (step 2), they send their updated " -"models to the server (step 3), and the server then aggregates the model " -"updates to get a new version of the global model (step 4)." -msgstr "" -"단계 1에서 4는 우리가 말하는 단일 라운드 연합 학습입니다. 글로벌 모델 " -"파라미터는 참여하는 클라이언트 노드에 전송되고(1단계), 클라이언트 노드는 " -"로컬 데이터에 대한 훈련을 받고(2단계), 업데이트된 모델을 서버에 " -"전송하고(3단계), 서버는 모델 업데이트를 집계하여 글로벌 모델의 새로운 버전을 " -"얻습니다(4단계)." +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that iteration " -"only trains for a little while. This means that after the aggregation step " -"(step 4), we have a model that has been trained on all the data of all " -"participating client nodes, but only for a little while. We then have to " -"repeat this training process over and over again to eventually arrive at a " -"fully trained model that performs well across the data of all client nodes." -msgstr "" -"한 라운드의 반복에서 해당 반복에 참여하는 각 클라이언트 노드는 짧은 시간 " -"동안만 훈련합니다. 집계 단계(4단계) 이후 우리 모델이 관련된 모든 클라이언트 " -"노드의 모든 데이터에 대해 잠시 동안만 훈련되었음을 의미합니다. 그런 다음 " -"모든 클라이언트 노드의 데이터에서 잘 작동하는 완전히 훈련된 모델에 " -"도달하려면 이 훈련 과정을 계속 반복해야 합니다." +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning in " -"a nutshell. In later parts of this tutorial, we will go into more detail. " -"Interesting questions include: How can we select the best client nodes that " -"should participate in the next round? What's the best way to aggregate model " -"updates? How can we handle failing client nodes (stragglers)?" -msgstr "" -"축하합니다, 이제 연합 학습의 기초에 대해 알게 되었습니다. 물론 아직 논의해야 " -"할 내용이 많지만 이는 연합 학습의 축소판일 뿐입니다. 본 튜토리얼의 " -"후반부에는 좀 더 자세히 설명하겠습니다. 흥미로운 질문은 다음과 같습니다: " -"다음 라운드에 참여해야 할 가장 좋은 클라이언트 노드를 어떻게 선택할 수 " -"있을까요? 모델 업데이트를 집계하는 가장 좋은 방법은 무엇일까요? 실패한 " -"클라이언트 노드(낙오자)를 어떻게 처리할 수 있을까요?" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different client " -"nodes, we can also evaluate the model on that data to receive valuable " -"metrics. This is called federated evaluation, sometimes abbreviated as FE. " -"In fact, federated evaluation is an integral part of most federated learning " -"systems." -msgstr "" -"다양한 클라이언트 노드의 분산된 데이터에 대해 모델을 훈련할 수 있는 것처럼 " -"해당 데이터에 대한 모델을 평가하여 가치 있는 메트릭(metrics)을 받을 수도 " -"있습니다. 이를 연합 평가라고 하며 FE라고 약칭하기도 합니다. 사실 연합 평가는 " -"대부분의 연합 학습 시스템에서 필수적인 부분입니다." +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "연합 분석" +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from data. " -"Data analysis can yield valuable insights, but again, there's often not " -"enough data to get a clear answer. What's the average age at which people " -"develop a certain type of health condition? Federated analytics enables such " -"queries over multiple client nodes. It is usually used in conjunction with " -"other privacy-enhancing technologies like secure aggregation to prevent the " -"server from seeing the results submitted by individual client nodes." -msgstr "" -"많은 경우 머신러닝은 데이터로부터 가치를 얻기 위한 필수 조건이 아닙니다. " -"데이터 분석을 통해 귀중한 통찰력을 얻을 수 있지만, 명확한 답변을 얻기에는 " -"데이터가 충분하지 않은 경우가 많습니다. 특정 유형의 건강 상태가 발생하는 " -"평균 연령은 몇 살입니까? 연합 분석을 사용하면 여러 클라이언트 노드에서 " -"이러한 쿼리(query)를 실행할 수 있습니다. 서버가 단일 클라이언트 노드에서 " -"제출한 결과를 보지 못하도록 보안을 강화한 집합 방식과 같은 다른 프라이버시 " -"향상 기술과 함께 자주 사용됩니다." +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and sharing " -"statistical data, ensuring the privacy of individual participants. DP " -"achieves this by adding statistical noise to the model updates, ensuring any " -"individual participants’ information cannot be distinguished or re-" -"identified. This technique can be considered an optimization that provides a " -"quantifiable privacy protection measure." -msgstr "" -"차분 프라이버시(Differential Privacy)는 연합 학습의 맥락에서 종종 " -"언급됩니다. 통계 데이터를 분석하고 공유할 때 사용하는 프라이버시 보호 " -"방식으로, 참가자 개인의 프라이버시를 보장합니다. 차분 프라이버시는 모델 " -"업데이트에 통계적 잡음(noise)를 추가하여 개별 참가자의 정보를 구별하거나 " -"재식별할 수 없도록 함으로써 이를 달성합니다. 이 기술은 정량적 개인 정보 보호 " -"조치를 제공하는 최적화라고 볼 수 있습니다." +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Flower" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require " -"infrastructure to move machine learning models back and forth, train and " -"evaluate them on local data, and then aggregate the updated models. Flower " -"provides the infrastructure to do exactly that in an easy, scalable, and " -"secure way. In short, Flower presents a unified approach to federated " -"learning, analytics, and evaluation. It allows the user to federate any " -"workload, any ML framework, and any programming language." -msgstr "" -"연합 학습, 연합 평가 및 연합 분석은 머신러닝 모델을 앞뒤로 이동하고 로컬 " -"데이터에 대해 훈련 및 평가한 다음 업데이트된 모델을 통합하기 위한 기본 " -"프레임워크가 필요합니다. Flower가 제공하는 기반 구조는 간단하고 확장 " -"가능하며 안전한 방식으로 이러한 목표를 달성합니다. 간단히 말해서, Flower는 " -"연합 학습, 분석 및 평가를 위한 통합 접근 방식을 제공합니다. 이를 통해 " -"사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 " -"수 있습니다." +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal " -"computer, roomba, and phone)" -msgstr "Flower 연합 학습 서버 및 클라이언트 노드(자동차, 스쿠터, 개인용 컴퓨터, " -"룸바, 전화)" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and how " -"it relates to the classic (centralized) machine learning!" -msgstr "축하합니다, 지금까지 당신은 연합 학습의 기본 지식과 그것이 어떻게 전통적 (" -"중앙 집중식) 머신러닝과 관련되는지 배웠습니다!" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first federated " -"learning system with Flower." -msgstr "이 튜토리얼의 다음 부분에서는 Flower와 함께 첫 번째 연합 학습 시스템을 " -"구축할 것입니다." +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 `__ shows how to " -"build a simple federated learning system with PyTorch and Flower." -msgstr "" -"`Flower 연합 학습 튜토리얼- 1부 `__ PyTorch와 Flower를 사용하여 간단한 " -"연합 학습 시스템을 구축하는 방법을 보여줍니다." +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgstr "" -#~ msgid "" -#~ "Currently, Flower provides two images, a ``base`` image and a " -#~ "``superlink`` image. The base image, as the name suggests, contains basic " -#~ "dependencies that the SuperLink needs. This includes system dependencies, " -#~ "Python and Python tools. The SuperLink image is based on the base image, " -#~ "but it additionally installs the SuperLink using ``pip``." -#~ msgstr "" -#~ "현재, Flower는 \"base\" 이미지 그리고 \"superlink\" 이미지를 제공합니다. " -#~ "base 이미지는 이름에서 알 수 있듯이 SuperLink가 필요로 하는 기본 " -#~ "dependencies를 포함하고 있습니다. 여기에는 시스템 dependencies, Python 및 " -#~ "Python 도구가 포함됩니다. SuperLink 이미지는 base 이미지를 기반으로 하지" -#~ "만 \"pip\"을 사용하여 SuperLink를 추가로 설치합니다." +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgstr "" -#~ msgid "``3.11``" -#~ msgstr "``3.11``" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgstr "" -#~ msgid "Defaults to ``22.04``." -#~ msgstr "``22.04``이 기본값." +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgstr "" -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "``flwr/base``이 기본값." +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "base 이미지의 Python 버전." +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgstr "" -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "``py3.11``이 기본값." +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "``ubuntu22.04``이 기본값." +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "``flwr``이 기본값." +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgstr "" -#~ msgid "" -#~ "The name of image is ``flwr_superlink`` and the tag ``0.1.0``. Remember " -#~ "that the build arguments as well as the name and tag can be adapted to " -#~ "your needs. These values serve as examples only." +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" -#~ "이미지의 이름은 ``flwr_superlink``이고 태그는 ``0.1.0``입니다. 필요에 따" -#~ "라 빌드 argument들 뿐만 아니라 이름과 태그도 정할 수 있습니다. 이 값들은 " -#~ "예시일 뿐입니다." + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index e50c290432cc..393c04bb0b13 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" "PO-Revision-Date: 2024-05-25 11:09+0000\n" "Last-Translator: Gustavo Bertoli \n" "Language: pt_BR\n" @@ -17,52 +17,198 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Arquitetura do Flower" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +msgid "Flower public API" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "Engine do Edge Client" +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" msgstr "" -"`Flower `_ arquitetura principal do framework com " -"Engine do Edge Client" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "Engine do Virtual Client" +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" msgstr "" -"`Flower `_ arquitetura principal do framework com " -"Engine do Virtual Client" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." msgstr "" -"Engine do Virtual Client e do Edge Client no mesma carga de trabalho " -"(workload)" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." msgstr "" -"`Flower `_ arquitetura principal do framework com " -"ambas engines do Virtual Client e do Edge Client" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "Como construir imagens Docker do Flower localmente" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -91,25 +237,16 @@ msgstr "" "Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " "ambiente de desenvolvimento local." -#: ../../source/contributor-how-to-build-docker-images.rst:12 -msgid "Clone the flower repository." +#: ../../source/contributor-how-to-build-docker-images.rst:13 +#, fuzzy +msgid "Clone the ``flower`` repository." msgstr "Clone o repositório do flower." -#: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 +#: ../../source/contributor-how-to-build-docker-images.rst:19 msgid "Verify the Docker daemon is running." msgstr "Verifique que o serviço Docker está rodando." -#: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -"Por favor, siga a primeira seção em :doc:`Execute o Flower usando Docker " -"` que cobre este passo em mais detalhes." - -#: ../../source/contributor-how-to-build-docker-images.rst:25 +#: ../../source/contributor-how-to-build-docker-images.rst:21 msgid "" "The build instructions that assemble the images are located in the " "respective Dockerfiles. You can find them in the subdirectories of " @@ -119,7 +256,7 @@ msgstr "" "respectivos Dockerfiles. Você pode encontrá-los nos subdiretórios " "``src/docker```." -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:24 #, fuzzy msgid "" "Flower Docker images are configured via build arguments. Through build " @@ -139,202 +276,224 @@ msgstr "" "Todos os argumentos de compilação disponíveis para cada imagem estão " "listados em uma das tabelas abaixo." -#: ../../source/contributor-how-to-build-docker-images.rst:35 -msgid "Building the base image" +#: ../../source/contributor-how-to-build-docker-images.rst:32 +#, fuzzy +msgid "Building the Base Image" msgstr "Construindo a imagem base" -#: ../../source/contributor-how-to-build-docker-images.rst:41 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 msgid "Build argument" msgstr "Argumento de compilação" -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 msgid "Description" msgstr "Descrição" -#: ../../source/contributor-how-to-build-docker-images.rst:43 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 msgid "Required" msgstr "Necessário" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 msgid "Example" msgstr "Exemplo" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 msgid "No" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:45 #, fuzzy msgid "``ubuntu``" msgstr "``UBUNTU_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:46 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -#, fuzzy -msgid "``22.04``" -msgstr "``23.0.1``" +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:50 msgid "``PYTHON_VERSION``" msgstr "``PYTHON_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:51 msgid "Version of ``python`` to be installed." msgstr "Versão do ``python`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:54 msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:55 msgid "Version of ``pip`` to be installed." msgstr "Versão do ``pip`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 msgid "Yes" msgstr "Sim" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -msgid "``23.0.1``" -msgstr "``23.0.1``" +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:58 msgid "``SETUPTOOLS_VERSION``" msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:59 msgid "Version of ``setuptools`` to be installed." msgstr "Versão do ``setuptools`` a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:64 -msgid "``69.0.2``" -msgstr "``69.0.2``" +#: ../../source/contributor-how-to-build-docker-images.rst:61 +#, fuzzy +msgid ":substitution-code:`|setuptools_version|`" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:62 msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:63 msgid "Version of Flower to be installed." msgstr "Versão do Flower a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:68 -#, fuzzy -msgid "``1.8.0``" -msgstr "``1.7.0``" +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:66 #, fuzzy msgid "``FLWR_PACKAGE``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:67 #, fuzzy msgid "The Flower package to be installed." msgstr "Versão do Flower a ser instalada." -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:70 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:76 +msgid "`Direct Reference Examples`_" +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:78 #, fuzzy msgid "" "The following example creates a base Ubuntu/Alpine image with Python " -"3.11.0, pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "" "O exemplo seguinte cria uma imagem base com Python 3.11.0, pip 23.0.1 e " "setuptools 69.0.2:" -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:93 +#, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "" "O nome da imagem é ``flwr_base`` com a tag ``0.1.0``. Lembre-se que os " "argumentos de construção assim como o nome e a tag podem ser adaptados de" " acordo com suas necessidades. Estes valores servem apenas como exemplo." -#: ../../source/contributor-how-to-build-docker-images.rst:92 +#: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "Construindo a imagem do servidor" +msgid "Building a Flower Binary Image" +msgstr "Construindo a imagem base" -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:108 msgid "``BASE_REPOSITORY``" msgstr "``BASE_REPOSITORY``" -#: ../../source/contributor-how-to-build-docker-images.rst:103 +#: ../../source/contributor-how-to-build-docker-images.rst:109 msgid "The repository name of the base image." msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:105 +#: ../../source/contributor-how-to-build-docker-images.rst:111 #, fuzzy msgid "``flwr/base``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:106 +#: ../../source/contributor-how-to-build-docker-images.rst:112 #, fuzzy msgid "``BASE_IMAGE``" msgstr "``BASE_REPOSITORY``" -#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/contributor-how-to-build-docker-images.rst:113 #, fuzzy msgid "The Tag of the Flower base image." msgstr "O nome do repositório da imagem base." -#: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:111 -#, fuzzy +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image " -"with the official Flower base image:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" msgstr "" -"O exemplo a seguir cria uma imagem de servidor com a imagem base oficial " -"do Flower py3.11-ubuntu22.04 e Flower 1.7.0:" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:128 #, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` build " -"argument." +"argument to ``flwr_base`` (as we've specified above)." msgstr "" "Se você quiser usar sua própria imagem base ao invés da imagem oficial " "base do Flower, tudo que você precisa fazer é definir os argumentos " @@ -343,10 +502,14 @@ msgstr "" "sua imagem e o valor de ``BASE_IMAGE_TAG`` deve corresponder à tag da sua" " imagem." -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:140 msgid "After creating the image, we can test whether the image is working:" msgstr "Depois de criar a imagem, podemos testar se a imagem está funcionando:" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +msgid "Direct Reference Examples" +msgstr "" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "Contribua com traduções" @@ -402,7 +565,7 @@ msgstr "" " as configurações de perfil podem ser encontradas `aqui " "`_." -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_. Aqui, você deve ver os diferentes idiomas existentes " "que podem ser encontrados no site." -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" @@ -422,7 +585,7 @@ msgstr "" "Uma vez que você tenha selecionado o idioma que deseja contribuir, você " "deve ver uma interface semelhante a esta:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -434,12 +597,12 @@ msgstr "" "automaticamente para a interface de tradução de strings ainda não " "traduzidas." -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 #, fuzzy msgid "This is what the interface looks like:" msgstr "É assim que a interface se parece:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 msgid "" "You input your translation in the text box at the top and then, once you " "are happy with it, you either press ``Save and continue`` (to save the " @@ -456,7 +619,7 @@ msgstr "" "ou ``Skip`` (para ir para a próxima string não traduzida sem salvar nada " "na atual)." -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -471,7 +634,7 @@ msgstr "" "(outras línguas), e o ``History`` (histórico) de traduções para esta " "string." -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " @@ -481,7 +644,7 @@ msgstr "" " link sob ``Source string location`` para visualizar a fonte do arquivo " "doc que contém a string." -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -491,12 +654,12 @@ msgstr "" "pode conferir este `guia detalhado " "`_." -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 #, fuzzy msgid "Add new languages" msgstr "Adicionar novos idiomas" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -519,20 +682,19 @@ msgid "" "extension. What is it? Read the following quote:" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -541,28 +703,28 @@ msgid "" " environment just by connecting to a different container." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 msgid "" "Source: `Official VSCode documentation " "`_" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -572,20 +734,20 @@ msgid "" "option *(Re)Open Folder in Container*." msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 msgid "" "`Developing inside a Container " "`_" msgstr "" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 msgid "" "`Remote development in Containers " "`_" @@ -610,154 +772,154 @@ msgid "" "``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " "matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-" @@ -774,25 +936,25 @@ msgid "" "change in the future." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " "order to add every new change to the changelog (feel free to make manual " "changes to the changelog afterwards until it looks good)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 msgid "" "Once the changelog has been updated with all the changes, run ``./dev" "/prepare-release-changelog.sh v``, where ```` " @@ -802,7 +964,7 @@ msgid "" "the contributors. Open a pull request with those changes." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 msgid "" "Once the pull request is merged, tag the release commit with the version " "number as soon as the PR is merged: ``git tag v`` (notice " @@ -811,149 +973,100 @@ msgid "" "artifacts and the relevant part of the changelog." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:15 -#, fuzzy -msgid "Trigger the CI for building the Docker images." -msgstr "Versão da imagem Docker oficial do Ubuntu." - -#: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a " -"``workflow_dispatch`` event in the GitHub CI. This can be done either " -"through the UI or via the GitHub CLI. The event requires only one input, " -"the Flower version, to be released." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:21 -msgid "**Via the UI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page " -"`_." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower " -"in the ``Version of Flower`` input field." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "" - #: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -976,11 +1089,11 @@ msgid "" "surface this will become the next stable release" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "" @@ -996,19 +1109,19 @@ msgid "" "the instructions or choose your preferred setup." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 msgid "" "Due to a known incompatibility with `ray " "`_, we currently recommend utilizing at " @@ -1016,11 +1129,11 @@ msgid "" "simulations." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 msgid "Virtualenv with Pyenv/Virtualenv" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_ for details." msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " "simply create a virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1089,11 +1202,11 @@ msgstr "" msgid "Write documentation" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1101,7 +1214,7 @@ msgid "" msgstr "" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 msgid "" "Note that, in order to build the documentation locally (with ``poetry run" " make html``, like described below), `Pandoc " @@ -1109,20 +1222,20 @@ msgid "" "system." msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "" @@ -1155,41 +1268,41 @@ msgid "" "the Flower codebase." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 msgid "" "If you are not familiar with Flower Baselines, you should probably check-" "out our `contributing guide for baselines " "`_." msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 msgid "" "You should then check out the open `issues " "`_" @@ -1198,7 +1311,7 @@ msgid "" "working on it!" msgstr "" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1239,30 +1352,30 @@ msgid "" "special case of the SecAgg+ protocol." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +msgid "The ``SecAgg+`` abstraction" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" " keys of int type rather than ClientProxy type." msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +msgid "The ``LightSecAgg`` abstraction" msgstr "" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "" @@ -1276,22 +1389,22 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 msgid "" "If you're familiar with how contributing on GitHub works, you can " "directly checkout our :doc:`getting started guide for contributors " "`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 msgid "" "Git is a distributed version control tool. This allows for an entire " "codebase's history to be stored and every developer's machine. It is a " @@ -1300,20 +1413,20 @@ msgid "" "started-with-git/set-up-git>`_ to set it up." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1321,19 +1434,19 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1341,11 +1454,11 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1353,27 +1466,27 @@ msgid "" "ability to copy the HTTPS link of the repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1381,27 +1494,27 @@ msgid "" "account and copying the link." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1410,17 +1523,17 @@ msgid "" "in our own account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 msgid "" "This can be achieved by following this :doc:`getting started guide for " "contributors ` (note " @@ -1428,158 +1541,158 @@ msgid "" "code and test it, you can finally start making changes!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " "guidelines, otherwise it won't be possible to merge the PR. So in this " "case, a correct title might be ``docs(framework:skip) Fix typos``." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1587,167 +1700,167 @@ msgid "" "process." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 msgid "It is important to follow the instructions described in comments." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 msgid "Before: \"How to saving progress\" ❌" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 msgid "After: \"How to save progress\" ✅" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 msgid "Find the source file in ``doc/source``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 msgid "" "Build the docs and `check the result `_" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1755,109 +1868,109 @@ msgid "" "engine ranking." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 msgid "Here's how to change the file name:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 msgid "Change the file name to ``save-progress.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 msgid "" "For the lateral navigation bar to work properly, it is very important to " "update the ``index.rst`` file as well. This is where we define the whole " "arborescence of the navbar." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 msgid "Find and modify the file name in ``index.rst``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 msgid "PR title format" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 msgid "We enforce the following PR title format:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " "break}``, ```` should be in ``{framework, baselines, datasets, " @@ -1866,50 +1979,50 @@ msgid "" "verb in the imperative mood." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "Exemplo" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 msgid "``feat(framework) Add flwr build CLI command``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 msgid "``ci(*:skip) Enforce PR title format``" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 msgid "Invalid examples:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -1918,12 +2031,16 @@ msgid "Get started as a contributor" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 -msgid "`Python 3.8 `_ or above" +msgid "`Python 3.9 `_ or above" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 @@ -1940,17 +2057,17 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 -msgid "Preliminarities" +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +msgid "Preliminaries" msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -1967,94 +2084,93 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 msgid "For Ubuntu" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 msgid "Create Flower Dev Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 msgid "Add a pre-commit hook" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " "the `pre-commit `_ library. The pre-" @@ -2062,19906 +2178,27072 @@ msgid "" "``./dev/format.sh`` and ``./dev/test.sh`` scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 msgid "There are multiple ways developers can use this:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 msgid "" "For developers who prefer not to install the hook permanently, it is " "possible to execute a one-time check prior to committing changes by using" " the following command:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 msgid "Build Release" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 msgid "Build Documentation" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" +#: ../../source/docker/enable-tls.rst:2 +msgid "Enable TLS for Secure Connections" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" +#: ../../source/docker/enable-tls.rst:7 +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:12 msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" +#: ../../source/docker/enable-tls.rst:27 +msgid "SuperLink" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:29 msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "Understanding the command" +msgstr "" + +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "" + +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"the current working directory of the host machine as a read-only volume " +"at the" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" +#: ../../source/docker/enable-tls.rst +msgid "directory." msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" +#: ../../source/docker/enable-tls.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" +#: ../../source/docker/enable-tls.rst +msgid "SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 -msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 +#: ../../source/docker/enable-tls.rst msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" +msgstr "" + +#: ../../source/docker/enable-tls.rst +msgid "the network." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst:72 +msgid "SuperNode" +msgstr "" + +#: ../../source/docker/enable-tls.rst:74 msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" +#: ../../source/docker/enable-tls.rst +msgid "directory inside the container." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst:107 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" -msgstr "" - -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" +#: ../../source/docker/enable-tls.rst +msgid "" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst msgid "" -"update the parameters of the local model with the parameters received " -"from the server" +"location of the CA certificate file inside the container that the " +"SuperExec executor" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" +#: ../../source/docker/index.rst:2 +msgid "Run Flower using Docker" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" +#: ../../source/docker/index.rst:4 +msgid "" +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" +#: ../../source/docker/index.rst:7 +msgid "" +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" +#: ../../source/docker/index.rst:11 +msgid "Getting Started" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 -msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +#: ../../source/docker/index.rst:19 +msgid "Running in Production" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 -msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +#: ../../source/docker/index.rst:28 +msgid "Advanced Options" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." +#: ../../source/docker/index.rst:40 +msgid "Run Flower using Docker Compose" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 +#: ../../source/docker/persist-superlink-state.rst:4 msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" +#: ../../source/docker/persist-superlink-state.rst:21 +msgid "" +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/persist-superlink-state.rst:36 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 -msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +#: ../../source/docker/pin-version.rst:2 +msgid "Pin a Docker Image to a Specific Version" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/pin-version.rst:4 msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/pin-version.rst:14 msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 -msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 -msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." +#: ../../source/docker/pin-version.rst:30 +msgid "Next, we can pin the digest when running a new SuperLink container:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 -msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 -msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" -msgstr "" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "" +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "Construindo a imagem do servidor" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" +#: ../../source/docker/run-as-subprocess.rst:2 +msgid "Run ClientApp as a Subprocess" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-as-subprocess.rst:4 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 -msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" +#: ../../source/docker/run-as-subprocess.rst:17 +msgid "Dockerfile.supernode" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-as-subprocess.rst:31 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" msgstr "" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "DP Intro" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:22 -msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." msgstr "" -#: ../../source/explanation-differential-privacy.rst:25 -msgid "Formal Definition" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Verifique que o serviço Docker está rodando." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:26 -msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 +msgid "Run the Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:32 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +"Download the `compose.yml " +"`_" +" file into the example directory:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:45 -msgid "Differential Privacy in Machine Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 +msgid "Build and start the services using the following command:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:53 -msgid "Differential Privacy in Federated Learning" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 +msgid "pyproject.toml" msgstr "" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -#: ../../source/explanation-differential-privacy.rst:60 -msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 +#, fuzzy +msgid "Run the example:" +msgstr "Exemplo" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 -msgid "Central Differential Privacy" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -#: ../../source/explanation-differential-privacy.rst:69 -msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +msgid "Limitations" msgstr "" -#: ../../source/explanation-differential-privacy.rst:76 -msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +msgid "Quickstart Example" msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -msgid "clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +msgid "quickstart-fastai" msgstr "" -#: ../../source/explanation-differential-privacy.rst:89 -msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" msgstr "" -#: ../../source/explanation-differential-privacy.rst:94 -msgid "Clipping" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +msgid "quickstart-huggingface" msgstr "" -#: ../../source/explanation-differential-privacy.rst:96 -msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +msgid "quickstart-jax" msgstr "" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." +"The example has not yet been updated to work with the latest ``flwr`` " +"version." msgstr "" -#: ../../source/explanation-differential-privacy.rst:100 -msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +msgid "quickstart-mlcube" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +msgid "quickstart-mlx" msgstr "" -#: ../../source/explanation-differential-privacy.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 -msgid "Local Differential Privacy" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +msgid "quickstart-monai" msgstr "" -#: ../../source/explanation-differential-privacy.rst:107 -msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +msgid "quickstart-pandas" msgstr "" -#: ../../source/explanation-differential-privacy.rst:116 -msgid "In this explainer, we focus on two forms of achieving Local DP:" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +msgid "quickstart-pytorch-lightning" msgstr "" -#: ../../source/explanation-differential-privacy.rst:118 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -#: ../../source/explanation-differential-privacy.rst:120 -msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +msgid "quickstart-pytorch" msgstr "" -#: ../../source/explanation-differential-privacy.rst:125 -msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "quickstart-sklearn-tabular" msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 -msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 +msgid "quickstart-tabnet" msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -msgid "**References:**" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 +msgid "quickstart-tensorflow" msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +#: ../../source/docker/set-environment-variables.rst:2 +msgid "Set Environment Variables" msgstr "" -#: ../../source/explanation-differential-privacy.rst:137 +#: ../../source/docker/set-environment-variables.rst:4 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 +msgid "Deploy Flower on Multiple Machines with Docker Compose" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 +msgid "" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 +msgid "" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 +msgid "" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 -msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 -msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"For production environments, you may have to use dedicated services to " +"obtain your certificates." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:108 -msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." msgstr "" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 -msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +msgid "Step 3: Start the Flower Server Components" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 +msgid "" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +msgid "Step 4: Start the Flower Client Components" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 +msgid "" +"On your local machine, run the following command to start the client " +"components:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 +msgid "" +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +msgid "Step 5: Run Your Flower Project" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 +msgid "" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 +msgid "" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 +msgid "" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +msgid "Shut down the Flower client components:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +msgid "Quickstart with Docker" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" +#: ../../source/docker/tutorial-quickstart-docker.rst:4 +msgid "" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" +#: ../../source/docker/tutorial-quickstart-docker.rst:45 +msgid "" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +msgid "Step 2: Start the SuperLink" msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +msgid "Open your terminal and run:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 +msgid "" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +msgid "Step 3: Start the SuperNode" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 -msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 -msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 -msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 -msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +msgid "Start the second container:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +#: ../../source/docker/tutorial-quickstart-docker.rst:149 +msgid "" +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +msgid "Dockerfile.clientapp" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the Dockerfile" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"from the current working directory into the container's ``/app`` " +"directory." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "from the ``pyproject.toml``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 -msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +#: ../../source/docker/tutorial-quickstart-docker.rst:186 +msgid "" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" +#: ../../source/docker/tutorial-quickstart-docker.rst:192 +msgid "" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 -msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +msgid "Start the first ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 -msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 -msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +msgid "Step 5: Start the SuperExec" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +msgid "" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst:245 +msgid "" +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 -msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" msgstr "" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +msgid "Start the SuperExec container:" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 -msgid "Authenticate SuperNodes" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:4 -msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:7 -msgid "SuperLink (server) stores a list of known (client) node public keys" +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +msgid "Add the following lines to the ``pyproject.toml``:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:8 -msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:9 -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:10 -msgid "SuperLink verifies the token" +#: ../../source/docker/tutorial-quickstart-docker.rst:344 +msgid "Step 7: Update the Application" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:15 -msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." +#: ../../source/docker/tutorial-quickstart-docker.rst:349 +msgid "quickstart_docker/task.py" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 -msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." +#: ../../source/docker/tutorial-quickstart-docker.rst:356 +msgid "Stop the current ClientApp containers:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 -msgid "Enable node authentication in :code:`SuperLink`" +#: ../../source/docker/tutorial-quickstart-docker.rst:362 +#, fuzzy +msgid "Rebuild the FAB and ClientApp image:" +msgstr "Construindo a imagem base" + +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:23 -msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:38 -msgid "Let's break down the authentication flags:" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:40 -msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:42 -msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 +msgid "Where to Go Next" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 +msgid "Quickstart with Docker Compose" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:47 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:53 -msgid "Enable node authentication in :code:`SuperNode`" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:66 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:70 -msgid "Security notice" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 +msgid "Step 2: Run Flower in Insecure Mode" msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 +msgid "" +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:79 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." msgstr "" -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +msgid "``docker compose``: The Docker command to run the Docker Compose tool." msgstr "" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 +msgid "" +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -#: ../../source/how-to-configure-clients.rst:20 -msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" msgstr "" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -#: ../../source/how-to-configure-clients.rst:26 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." msgstr "" -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" msgstr "" -#: ../../source/how-to-configure-clients.rst:32 -msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" msgstr "" -#: ../../source/how-to-configure-clients.rst:34 -msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -#: ../../source/how-to-configure-clients.rst:47 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +msgid "Step 4: Update the Application" msgstr "" -#: ../../source/how-to-configure-clients.rst:67 -msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." msgstr "" -#: ../../source/how-to-configure-clients.rst:69 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" msgstr "" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" msgstr "" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 +msgid "Rebuild and restart the services." msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -#: ../../source/how-to-configure-clients.rst:89 -msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." msgstr "" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" msgstr "" -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" msgstr "" -#: ../../source/how-to-configure-logging.rst:13 -msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" msgstr "" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -#: ../../source/how-to-configure-logging.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." msgstr "" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 +msgid "" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." msgstr "" -#: ../../source/how-to-configure-logging.rst:76 -msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" msgstr "" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." msgstr "" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:4 -msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:10 -msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." -msgstr "" - -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:31 -msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:39 -msgid "Server (SuperLink)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:41 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:50 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:54 -msgid "Client (SuperNode)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:56 -msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:64 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:75 -msgid "Additional resources" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:77 -msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 +msgid "" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:4 -msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" msgstr "" -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +msgid "Restart the services:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:13 -msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:75 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" msgstr "" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +msgid "Remove all services and volumes:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +msgid ":doc:`run-quickstart-examples-docker-compose`" msgstr "" -#: ../../source/how-to-implement-strategies.rst:182 -msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +#: ../../source/docker/use-a-different-version.rst:2 +msgid "Use a Different Flower Version" msgstr "" -#: ../../source/how-to-implement-strategies.rst:184 +#: ../../source/docker/use-a-different-version.rst:4 msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/docker/use-a-different-version.rst:10 msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" msgstr "" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." msgstr "" -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" msgstr "" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." msgstr "" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 -msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" msgstr "" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -#: ../../source/how-to-implement-strategies.rst:236 -msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" msgstr "" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 +msgid "" +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." msgstr "" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -#: ../../source/how-to-implement-strategies.rst:258 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" msgstr "" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" msgstr "" -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" msgstr "" -#: ../../source/how-to-implement-strategies.rst:265 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" msgstr "" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -#: ../../source/how-to-implement-strategies.rst:283 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +msgid "" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." msgstr "" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." msgstr "" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." msgstr "" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." msgstr "" -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +msgid "" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." msgstr "" -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 +msgid "" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." msgstr "" -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 +msgid "" +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." msgstr "" -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -msgid "Using pip" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" msgstr "" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 msgid "" -"Stable releases are available on `PyPI " -"`_::" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" msgstr "" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" msgstr "" -#: ../../source/how-to-install-flower.rst:27 -msgid "Using conda (or mamba)" -msgstr "" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#, fuzzy +msgid "``set_parameters``" +msgstr "``SETUPTOOLS_VERSION``" -#: ../../source/how-to-install-flower.rst:29 -msgid "Flower can also be installed from the ``conda-forge`` channel." +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 +msgid "" +"set the model parameters on the local model that are received from the " +"server" msgstr "" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" msgstr "" -#: ../../source/how-to-install-flower.rst:36 -msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 +msgid "``get_parameters``" msgstr "" -#: ../../source/how-to-install-flower.rst:40 -msgid "or with ``mamba``::" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +msgid "" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" msgstr "" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" +"update the parameters of the local model with the parameters received " +"from the server" msgstr "" -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" msgstr "" -#: ../../source/how-to-install-flower.rst:58 -msgid "Install via Docker" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" msgstr "" -#: ../../source/how-to-install-flower.rst:60 -msgid ":doc:`How to run Flower using Docker `" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "``evaluate``" msgstr "" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" msgstr "" -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -#: ../../source/how-to-install-flower.rst:69 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" msgstr "" -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" msgstr "" -#: ../../source/how-to-install-flower.rst:76 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" msgstr "" -#: ../../source/how-to-install-flower.rst:80 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:4 -msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/explanation-differential-privacy.rst:4 msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/explanation-differential-privacy.rst:9 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/explanation-differential-privacy.rst:16 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "DP Intro" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:27 msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:44 -msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" +#: ../../source/explanation-differential-privacy.rst:32 +msgid "Formal Definition" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:34 msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:69 +#: ../../source/explanation-differential-privacy.rst:42 msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:47 msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "" - -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:90 -msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." +#: ../../source/explanation-differential-privacy.rst:56 +msgid "Differential Privacy in Machine Learning" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:58 msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." +#: ../../source/explanation-differential-privacy.rst:69 +msgid "Differential Privacy in Federated Learning" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:71 msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." +#: ../../source/explanation-differential-privacy.rst:78 +msgid "" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:119 +#: ../../source/explanation-differential-privacy.rst:81 msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-differential-privacy.rst:86 msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:123 +#: ../../source/explanation-differential-privacy.rst:88 msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 +msgid "Central Differential Privacy" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-differential-privacy.rst:95 msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-differential-privacy.rst:104 msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" +#: ../../source/explanation-differential-privacy.rst:-1 +msgid "clipping" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-differential-privacy.rst:120 msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." +#: ../../source/explanation-differential-privacy.rst:126 +msgid "Clipping" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-differential-privacy.rst:128 msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-differential-privacy.rst:131 msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" +#: ../../source/explanation-differential-privacy.rst:133 +msgid "" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." +#: ../../source/explanation-differential-privacy.rst:137 +msgid "" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:216 -msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 +msgid "Local Differential Privacy" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-differential-privacy.rst:143 msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:220 -msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" +#: ../../source/explanation-differential-privacy.rst:152 +msgid "In this explainer, we focus on two forms of achieving Local DP:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-differential-privacy.rst:154 msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-differential-privacy.rst:158 msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" +#: ../../source/explanation-differential-privacy.rst:163 +msgid "" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-differential-privacy.rst:167 msgid "" -"Ray Dashboard: ``_" +"Please note that these two approaches are providing privacy at different " +"levels." msgstr "" -#: ../../source/how-to-monitor-simulation.rst:236 -msgid "Ray Metrics: ``_" +#: ../../source/explanation-differential-privacy.rst:169 +msgid "**References:**" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:2 -msgid "Run Flower using Docker" +#: ../../source/explanation-differential-privacy.rst:171 +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-differential-privacy.rst:173 msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__. Supported architectures include " -"``amd64`` and ``arm64v8``." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:8 -msgid "Before you start, make sure that the Docker daemon is running:" +#: ../../source/explanation-differential-privacy.rst:175 +msgid "" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:15 -msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +#: ../../source/explanation-differential-privacy.rst:177 +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:21 -msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:27 +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:32 -msgid "Flower SuperLink" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:35 -msgid "Quickstart" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst:37 -msgid "If you're looking to try out Flower, you can use the following command:" +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:43 +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:49 -msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:53 +#: ../../source/explanation-federated-evaluation.rst:63 msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 -msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:65 -msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:72 -msgid "Mounting a volume to store the state on the host system" +#: ../../source/explanation-federated-evaluation.rst:74 +msgid "" +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:74 -msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a directory where you want to save the file" -" on your host system and a name for the database file. By default, the " -"SuperLink container runs with a non-root user called ``app`` with the " -"user ID ``49999``. It is recommended to create new directory and change " -"the user ID of the directory to ``49999`` to ensure the mounted directory" -" has the proper permissions. If you later want to delete the directory, " -"you can change the user ID back to the current user ID by running ``sudo " -"chown -R $USER:$(id -gn) state``." +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:82 +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"In the example below, we create a new directory, change the user ID and " -"tell Docker via the flag ``--volume`` to mount the local ``state`` " -"directory into the ``/app/state`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:95 +#: ../../source/explanation-federated-evaluation.rst:113 msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -msgid "Enabling SSL for secure connections" +#: ../../source/explanation-federated-evaluation.rst:118 +msgid "" +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:102 +#: ../../source/explanation-federated-evaluation.rst:122 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:106 +#: ../../source/explanation-federated-evaluation.rst:127 msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:110 -msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container. This allows the " -"SuperLink to access the files within the container. The ``ro`` stands for" -" ``read-only``. Docker volumes default to ``read-write``; that option " -"tells Docker to make the volume ``read-only`` instead. Finally, we pass " -"the names of the certificates and key file to the SuperLink with the " -"``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag." +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:128 +#: ../../source/explanation-federated-evaluation.rst:159 msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, " -"the mounted files and directories must have the proper permissions for " -"the user ID ``49999``. For example, to change the user ID of all files in" -" the ``certificates/`` directory, you can run ``sudo chown -R 49999:49999" -" certificates/*``." +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:134 -msgid "Flower SuperNode" +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:136 +#: ../../source/explanation-federated-evaluation.rst:203 msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:141 +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:147 +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Arquitetura do Flower" + +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:155 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -"Antes de começarmos, precisamos encontrar alguns pré-requisitos em nosso " -"ambiente de desenvolvimento local." - -#: ../../source/how-to-run-flower-using-docker.rst:159 -#, fuzzy -msgid "Clone the Flower repository." -msgstr "Clone o repositório do flower." -#: ../../source/how-to-run-flower-using-docker.rst:173 -msgid "Creating a SuperNode Dockerfile" +#: ../../source/explanation-flower-architecture.rst:9 +msgid "" +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -msgid "Let's assume the following project layout:" +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:184 -msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +#: ../../source/explanation-flower-architecture.rst:21 +msgid "Hub-and-spoke topology in federated learning" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:196 +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:200 +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:203 +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:217 -msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:226 -#, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:228 +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:235 +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:240 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:242 -msgid "Now that we have built the SuperNode image, we can finally run it." +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -msgid "Let's break down each part of this command:" +#: ../../source/explanation-flower-architecture.rst:43 +msgid "" +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 -msgid "``docker run``: This is the command to run a new Docker container." +#: ../../source/explanation-flower-architecture.rst:46 +msgid "" +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:254 -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "" +#: ../../source/explanation-flower-architecture.rst:62 +#, fuzzy +msgid "Basic Flower architecture" +msgstr "Arquitetura do Flower" -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -msgid "``--insecure``: This option enables insecure communication." +#: ../../source/explanation-flower-architecture.rst:62 +msgid "The basic Flower architecture for federated learning." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of " -"the SuperLinks Fleet" -msgstr "" - -#: ../../source/how-to-run-flower-using-docker.rst -msgid "API to connect to. Remember to update it with your SuperLink IP." +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:269 +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:273 +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:283 +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:285 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--root-certificates`` flag when starting " -"the container." +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:297 -msgid "Flower ServerApp" +#: ../../source/explanation-flower-architecture.rst:87 +msgid "Multi-tenancy federated learning architecture with Flower" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:299 +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:301 +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:304 -msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." +#: ../../source/explanation-flower-architecture.rst:103 +msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:309 -msgid "Creating a ServerApp Dockerfile" +#: ../../source/explanation-flower-architecture.rst:103 +msgid "" +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:320 +#: ../../source/explanation-flower-architecture.rst:106 msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:324 -msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." +#: ../../source/explanation-flower-architecture.rst:115 +msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:335 +#: ../../source/explanation-flower-architecture.rst:115 msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:343 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:345 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:352 +#: ../../source/explanation-flower-architecture.rst:121 msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:357 -#, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "Construindo a imagem do servidor" - -#: ../../source/how-to-run-flower-using-docker.rst:359 -msgid "Now that we have built the ServerApp image, we can finally run it." +#: ../../source/explanation-flower-architecture.rst:128 +msgid "" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:371 -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of " -"the SuperLinks Driver" +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:385 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:389 +#: ../../source/explanation-flower-architecture.rst:146 msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:399 -msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:401 -msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--root-certificates`` flags when starting" -" the container." +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:412 -msgid "Advanced Docker options" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:417 -msgid "" -"Flower Docker images, by default, run with a non-root user " -"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is not" -" recommended unless it is necessary for specific tasks during the build " -"process. Always make sure to run the container as a non-root user in " -"production to maintain security best practices." +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:424 -msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:434 -msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the " -"``USER root`` directive within your Dockerfile." +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:454 -msgid "Using a different Flower version" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:456 -msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:460 -msgid "Pinning a Docker image to a specific version" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:462 -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:467 -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:474 -msgid "Next, we can pin the hash when running a new SuperLink container:" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:483 -msgid "Setting environment variables" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:485 -msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" msgstr "" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" msgstr "" -#: ../../source/how-to-run-simulations.rst:8 -msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" msgstr "" -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" msgstr "" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:13 -msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" msgstr "" -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" msgstr "" -#: ../../source/how-to-run-simulations.rst:16 -msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" msgstr "" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" msgstr "" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" msgstr "" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" msgstr "" -#: ../../source/how-to-run-simulations.rst:45 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" msgstr "" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" msgstr "" -#: ../../source/how-to-run-simulations.rst:63 -msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" msgstr "" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" msgstr "" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" msgstr "" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" msgstr "" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" msgstr "" -#: ../../source/how-to-run-simulations.rst:89 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" msgstr "" -#: ../../source/how-to-run-simulations.rst:91 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" msgstr "" -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" msgstr "" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" msgstr "" -#: ../../source/how-to-run-simulations.rst:98 -msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" msgstr "" -#: ../../source/how-to-run-simulations.rst:99 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." -msgstr "" - -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" +"into one file, which is created incrementally in collaboration with the " +"community." msgstr "" -#: ../../source/how-to-run-simulations.rst:106 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +msgid "" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +msgid "" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." msgstr "" -#: ../../source/how-to-run-simulations.rst:110 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." msgstr "" -#: ../../source/how-to-run-simulations.rst:111 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -#: ../../source/how-to-run-simulations.rst:112 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." msgstr "" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." msgstr "" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" msgstr "" -#: ../../source/how-to-run-simulations.rst:117 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." +"Each enhancement doc is provided as a Markdown file having the following " +"structure" msgstr "" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" msgstr "" -#: ../../source/how-to-run-simulations.rst:122 -msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" msgstr "" -#: ../../source/how-to-run-simulations.rst:124 -msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" msgstr "" -#: ../../source/how-to-run-simulations.rst:126 -msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" msgstr "" -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" msgstr "" -#: ../../source/how-to-run-simulations.rst:135 -msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" msgstr "" -#: ../../source/how-to-run-simulations.rst:138 -msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" msgstr "" -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." msgstr "" -#: ../../source/how-to-run-simulations.rst:143 -msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" msgstr "" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." msgstr "" -#: ../../source/how-to-run-simulations.rst:147 -msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." msgstr "" -#: ../../source/how-to-run-simulations.rst:149 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." msgstr "" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." msgstr "" -#: ../../source/how-to-run-simulations.rst:153 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." msgstr "" -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +msgid "" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." msgstr "" -#: ../../source/how-to-run-simulations.rst:158 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." msgstr "" -#: ../../source/how-to-run-simulations.rst:160 -msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." msgstr "" -#: ../../source/how-to-run-simulations.rst:179 -msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." msgstr "" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" msgstr "" -#: ../../source/how-to-run-simulations.rst:185 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." msgstr "" -#: ../../source/how-to-run-simulations.rst:187 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +msgid "" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." -msgstr "" - -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 -msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." msgstr "" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 -msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +msgid "" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 -msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 -msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" +#: ../../source/how-to-aggregate-evaluation-results.rst:4 +msgid "" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +msgid "" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 -msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" +#: ../../source/how-to-authenticate-supernodes.rst:2 +msgid "Authenticate SuperNodes" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/how-to-authenticate-supernodes.rst:4 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" +#: ../../source/how-to-authenticate-supernodes.rst:8 +msgid "SuperLink (server) stores a list of known (client) node public keys" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/how-to-authenticate-supernodes.rst:9 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/how-to-authenticate-supernodes.rst:10 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" +#: ../../source/how-to-authenticate-supernodes.rst:12 +msgid "SuperLink verifies the token" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +#: ../../source/how-to-authenticate-supernodes.rst:14 msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-authenticate-supernodes.rst:20 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"This guide covers a preview feature that might change in future versions " +"of Flower." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-authenticate-supernodes.rst:24 msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 -msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" +#: ../../source/how-to-authenticate-supernodes.rst:28 +msgid "Enable node authentication in ``SuperLink``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-authenticate-supernodes.rst:30 msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +#: ../../source/how-to-authenticate-supernodes.rst:47 +msgid "Let's break down the authentication flags:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +#: ../../source/how-to-authenticate-supernodes.rst:49 +msgid "" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-authenticate-supernodes.rst:53 msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +#: ../../source/how-to-authenticate-supernodes.rst:57 +msgid "" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" +#: ../../source/how-to-authenticate-supernodes.rst:64 +msgid "" +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +#: ../../source/how-to-authenticate-supernodes.rst:71 +msgid "Enable node authentication in ``SuperNode``" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" +#: ../../source/how-to-authenticate-supernodes.rst:73 +msgid "" +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-authenticate-supernodes.rst:85 msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +#: ../../source/how-to-authenticate-supernodes.rst:91 +msgid "Security notice" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +#: ../../source/how-to-authenticate-supernodes.rst:93 msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "" + +#: ../../source/how-to-authenticate-supernodes.rst:102 msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 -msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" +#: ../../source/how-to-configure-clients.rst:31 +msgid "" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 -msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-configure-clients.rst:43 msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" +#: ../../source/how-to-configure-clients.rst:49 +msgid "" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-configure-clients.rst:65 msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 -msgid "Upgrade to Flower Next" +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 -msgid "Let's dive in!" +#: ../../source/how-to-configure-clients.rst:107 +msgid "The ``FedAvg`` strategy will call this function *every round*." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 -msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -msgid "or if you need Flower Next with simulation:" +#: ../../source/how-to-configure-clients.rst:112 +msgid "" +"In some cases, it is necessary to send different configuration values to " +"different clients." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-configure-clients.rst:115 msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -msgid "or ``pyproject.toml``:" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -msgid "Using Poetry" +#: ../../source/how-to-configure-logging.rst:4 +msgid "" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 +#: ../../source/how-to-configure-logging.rst:13 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 +#: ../../source/how-to-configure-logging.rst:37 msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 +#: ../../source/how-to-configure-logging.rst:59 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -msgid "|clientapp_link|_" +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -msgid "|serverapp_link|_" +#: ../../source/how-to-configure-logging.rst:114 +msgid "" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "" + +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 +#: ../../source/how-to-configure-logging.rst:142 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -msgid "Deployment" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-enable-ssl-connections.rst:4 msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 +#: ../../source/how-to-enable-ssl-connections.rst:8 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" +"A complete code example demonstrating a secure connection can be found " +"`here `_." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-enable-ssl-connections.rst:11 msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -msgid "Simulation in CLI" +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 +#: ../../source/how-to-enable-ssl-connections.rst:18 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 +#: ../../source/how-to-enable-ssl-connections.rst:29 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-enable-ssl-connections.rst:32 msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -msgid "Simulation in a Notebook" +#: ../../source/how-to-enable-ssl-connections.rst:40 +msgid "Server (SuperLink)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 +#: ../../source/how-to-enable-ssl-connections.rst:42 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-enable-ssl-connections.rst:52 msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -msgid "Important" +#: ../../source/how-to-enable-ssl-connections.rst:56 +msgid "Client (SuperNode)" msgstr "" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 +#: ../../source/how-to-enable-ssl-connections.rst:58 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" -msgstr "" - -#: ../../source/how-to-upgrade-to-flower-next.rst:334 -msgid "Happy migrating! 🚀" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:2 -msgid "Use Built-in Mods" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-enable-ssl-connections.rst:67 msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-enable-ssl-connections.rst:73 msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:9 -msgid "What are Mods?" +#: ../../source/how-to-enable-ssl-connections.rst:78 +msgid "Additional resources" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:11 +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:18 -msgid "A typical mod function might look something like this:" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:31 -msgid "Using Mods" +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:33 -msgid "To use mods in your ``ClientApp``, you can follow these steps:" +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:36 -msgid "1. Import the required mods" +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:38 -msgid "First, import the built-in mod you intend to use:" +#: ../../source/how-to-implement-strategies.rst:4 +msgid "" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:46 -msgid "2. Define your client function" +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The ``Strategy`` abstraction" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-implement-strategies.rst:13 msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:57 -msgid "3. Create the ``ClientApp`` with mods" +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-implement-strategies.rst:18 msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" -msgstr "" - -#: ../../source/how-to-use-built-in-mods.rst:72 -msgid "Order of execution" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-implement-strategies.rst:67 msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:76 -msgid "``example_mod_1`` (outermost mod)" +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:77 -msgid "``example_mod_2`` (next mod)" +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:78 -msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The ``initialize_parameters`` method" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:79 -msgid "``example_mod_2`` (on the way back)" +#: ../../source/how-to-implement-strategies.rst:179 +msgid "" +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:80 -msgid "``example_mod_1`` (outermost mod on the way back)" +#: ../../source/how-to-implement-strategies.rst:183 +msgid "" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-implement-strategies.rst:209 msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-implement-strategies.rst:218 msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." msgstr "" -#: ../../source/how-to-use-built-in-mods.rst:89 -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +#: ../../source/how-to-implement-strategies.rst:224 +msgid "The ``configure_fit`` method" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:2 -msgid "Use Differential Privacy" +#: ../../source/how-to-implement-strategies.rst:226 +msgid "" +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:3 +#: ../../source/how-to-implement-strategies.rst:239 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:7 +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-implement-strategies.rst:245 msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-implement-strategies.rst:248 msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-implement-strategies.rst:254 msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:21 -msgid "Server-side Clipping" +#: ../../source/how-to-implement-strategies.rst:261 +msgid "The ``aggregate_fit`` method" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-implement-strategies.rst:263 msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "server side clipping" +#: ../../source/how-to-implement-strategies.rst:277 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-implement-strategies.rst:282 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:52 -msgid "Client-side Clipping" +#: ../../source/how-to-implement-strategies.rst:288 +msgid "The ``configure_evaluate`` method" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-implement-strategies.rst:290 msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." -msgstr "" - -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "client side clipping" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-implement-strategies.rst:303 msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-implement-strategies.rst:309 msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-implement-strategies.rst:312 msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:-1 -msgid "local DP mod" +#: ../../source/how-to-implement-strategies.rst:318 +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:104 -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" +#: ../../source/how-to-implement-strategies.rst:325 +msgid "The ``aggregate_evaluate`` method" msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-implement-strategies.rst:327 msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:125 -msgid "Local Training using Privacy Engines" +#: ../../source/how-to-implement-strategies.rst:341 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-implement-strategies.rst:346 msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" +#: ../../source/how-to-implement-strategies.rst:352 +msgid "The ``evaluate`` method" msgstr "" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-implement-strategies.rst:354 msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." msgstr "" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-implement-strategies.rst:364 msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" -msgstr "" - -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" msgstr "" -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" +#: ../../source/how-to-install-flower.rst:5 +msgid "Python version" msgstr "" -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" msgstr "" -#: ../../source/how-to-use-strategies.rst:16 -msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +msgid "Using pip" msgstr "" -#: ../../source/how-to-use-strategies.rst:25 -msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" +#: ../../source/how-to-install-flower.rst:16 +msgid "Stable releases are available on `PyPI `_:" msgstr "" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-install-flower.rst:22 msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" msgstr "" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" +#: ../../source/how-to-install-flower.rst:30 +msgid "Using conda (or mamba)" msgstr "" -#: ../../source/how-to-use-strategies.rst:47 -msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +#: ../../source/how-to-install-flower.rst:32 +msgid "Flower can also be installed from the ``conda-forge`` channel." msgstr "" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-install-flower.rst:34 msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" msgstr "" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-install-flower.rst:42 msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" msgstr "" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" +#: ../../source/how-to-install-flower.rst:49 +msgid "or with ``mamba``:" msgstr "" -#: ../../source/how-to-use-strategies.rst:83 -msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" msgstr "" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-install-flower.rst:58 msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" msgstr "" -#: ../../source/index.rst:34 -msgid "Tutorial" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" msgstr "" -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" +#: ../../source/how-to-install-flower.rst:71 +msgid "Install via Docker" msgstr "" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" +#: ../../source/how-to-install-flower.rst:73 +msgid ":doc:`Run Flower using Docker `" msgstr "" -#: ../../source/index.rst:99 -msgid "Legacy example guides" +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" msgstr "" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" +#: ../../source/how-to-install-flower.rst:78 +msgid "" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" msgstr "" -#: None:-1 -msgid "API reference" +#: ../../source/how-to-install-flower.rst:85 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" msgstr "" -#: ../../source/index.rst:137 -msgid "Reference docs" +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" msgstr "" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" +#: ../../source/how-to-install-flower.rst:95 +msgid "" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" msgstr "" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" +#: ../../source/how-to-install-flower.rst:101 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" msgstr "" -#: ../../source/index.rst:172 -msgid "Contributor explanations" +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" msgstr "" -#: ../../source/index.rst:178 -msgid "Contributor references" +#: ../../source/how-to-monitor-simulation.rst:4 +msgid "" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -#: ../../source/index.rst:-1 +#: ../../source/how-to-monitor-simulation.rst:9 msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." msgstr "" -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" msgstr "" -#: ../../source/index.rst:7 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." msgstr "" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" +#: ../../source/how-to-monitor-simulation.rst:23 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." msgstr "" -#: ../../source/index.rst:13 -msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" msgstr "" -#: ../../source/index.rst:15 -msgid "Join us on Slack" +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" msgstr "" -#: ../../source/index.rst:23 -msgid "Flower Framework" +#: ../../source/how-to-monitor-simulation.rst:40 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" msgstr "" -#: ../../source/index.rst:25 +#: ../../source/how-to-monitor-simulation.rst:51 msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" msgstr "" -#: ../../source/index.rst:30 -msgid "Tutorials" +#: ../../source/how-to-monitor-simulation.rst:67 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" msgstr "" -#: ../../source/index.rst:32 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." +"Your terminal editor should open and allow you to apply the following " +"configuration as before." msgstr "" -#: ../../source/index.rst:61 +#: ../../source/how-to-monitor-simulation.rst:94 msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." msgstr "" -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" msgstr "" -#: ../../source/index.rst:68 -msgid "And TensorFlow:" +#: ../../source/how-to-monitor-simulation.rst:100 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." msgstr "" -#: ../../source/index.rst:76 +#: ../../source/how-to-monitor-simulation.rst:108 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." msgstr "" -#: ../../source/index.rst:110 +#: ../../source/how-to-monitor-simulation.rst:121 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" msgstr "" -#: ../../source/index.rst:120 -msgid "References" +#: ../../source/how-to-monitor-simulation.rst:127 +msgid "You can look at everything at http://127.0.0.1:8265 ." msgstr "" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." +#: ../../source/how-to-monitor-simulation.rst:129 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." msgstr "" -#: ../../source/index.rst:131::1 -msgid ":py:obj:`flwr `\\" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." msgstr "" -#: ../../source/index.rst:131::1 flwr:1 of -msgid "Flower main package." +#: ../../source/how-to-monitor-simulation.rst:137 +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." msgstr "" -#: ../../source/index.rst:148 -msgid "Contributor docs" +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" msgstr "" -#: ../../source/index.rst:150 +#: ../../source/how-to-monitor-simulation.rst:149 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." msgstr "" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" +#: ../../source/how-to-monitor-simulation.rst:152 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" msgstr "" -#: ../../source/ref-api-cli.rst:7 -msgid "flower-simulation" +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" msgstr "" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" msgstr "" -#: ../../source/ref-api-cli.rst:27 -msgid "flower-client-app" +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." msgstr "" -#: ../../source/ref-api-cli.rst:37 -msgid "flower-server-app" +#: ../../source/how-to-monitor-simulation.rst:225 +msgid "" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." msgstr "" -#: ../../source/ref-api/flwr.rst:2 -msgid "flwr" +#: ../../source/how-to-monitor-simulation.rst:228 +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." msgstr "" -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 -msgid "Modules" +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.client `\\" +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." +#: ../../source/how-to-monitor-simulation.rst:239 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.common `\\" +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." +#: ../../source/how-to-monitor-simulation.rst:246 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.server `\\" +#: ../../source/how-to-monitor-simulation.rst:252 +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" msgstr "" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -msgid "Flower simulation." +#: ../../source/how-to-monitor-simulation.rst:259 +msgid "" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" msgstr "" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" +#: ../../source/how-to-monitor-simulation.rst:261 +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -msgid "Functions" +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:19 +msgid "" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of -msgid "Run Flower client app." +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -msgid ":py:obj:`run_supernode `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:31 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of -msgid "Run Flower SuperNode." +#: ../../source/how-to-run-simulations.rst:33 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:38 msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:47 msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." +#: ../../source/how-to-run-simulations.rst:73 +msgid "VirtualClientEngine resources" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 -msgid "Classes" +#: ../../source/how-to-run-simulations.rst:75 +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." +#: ../../source/how-to-run-simulations.rst:99 +msgid "" +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:103 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of -msgid "Flower ClientApp." +#: ../../source/how-to-run-simulations.rst:110 +msgid "``num_cpus`` indicates the number of CPU cores a client would get." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -msgid ":py:obj:`NumPyClient `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:111 +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." msgstr "" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" msgstr "" -#: ../../source/ref-api/flwr.client.rst:52::1 -msgid ":py:obj:`flwr.client.mod `\\" +#: ../../source/how-to-run-simulations.rst:132 +msgid "" +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." msgstr "" -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of -msgid "Flower Built-in Mods." +#: ../../source/how-to-run-simulations.rst:140 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." msgstr "" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of -msgid "Bases: :py:class:`~abc.ABC`" +#: ../../source/how-to-run-simulations.rst:145 +msgid "Simulation examples" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -msgid "Methods" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:151 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:159 +msgid "Multi-node Flower simulations" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." +#: ../../source/how-to-run-simulations.rst:161 +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of -msgid "Get the run context from this client." +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:166 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." +#: ../../source/how-to-run-simulations.rst:168 +msgid "" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:171 +msgid "" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." +#: ../../source/how-to-run-simulations.rst:174 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`set_context `\\ \\(context\\)" +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of -msgid "Apply a run context to this client." +#: ../../source/how-to-run-simulations.rst:181 +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 -msgid "Attributes" +#: ../../source/how-to-run-simulations.rst:189 +msgid "" +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." msgstr "" -#: flwr.client.client.Client.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-run-simulations.rst:192 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" +#: ../../source/how-to-run-simulations.rst:202 +msgid "Considerations for simulations" msgstr "" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:206 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." -msgstr "" - -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." msgstr "" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:209 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." msgstr "" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" +#: ../../source/how-to-run-simulations.rst:217 +msgid "GPU resources" msgstr "" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:219 msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" msgstr "" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:222 msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." msgstr "" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:225 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" msgstr "" -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." +#: ../../source/how-to-run-simulations.rst:228 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" msgstr "" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:231 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." msgstr "" -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." +#: ../../source/how-to-run-simulations.rst:235 +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." msgstr "" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 -msgid "ClientApp" +#: ../../source/how-to-run-simulations.rst:240 +msgid "TensorFlow with GPUs" msgstr "" -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -msgid "Bases: :py:class:`object`" +#: ../../source/how-to-run-simulations.rst:242 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." msgstr "" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" +#: ../../source/how-to-run-simulations.rst:249 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" msgstr "" -#: flwr.client.client_app.ClientApp:5 of +#: ../../source/how-to-run-simulations.rst:272 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:278 msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." msgstr "" -#: flwr.client.client_app.ClientApp:21 of +#: ../../source/how-to-run-simulations.rst:286 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`evaluate `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid "Return a decorator that registers the evaluate fn with the client app." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -msgid "Return a decorator that registers the query fn with the client app." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +msgid "" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -msgid ":py:obj:`train `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +msgid "Save and load PyTorch checkpoints" msgstr "" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -msgid "Return a decorator that registers the train fn with the client app." +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +msgid "" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +msgid "" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -msgid ":py:obj:`to_client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" msgstr "" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +msgid "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -msgid ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of -msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." -msgstr "" - -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of -msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" msgstr "" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:2 -msgid "mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -msgid "Client-side adaptive clipping modifier." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" -":py:obj:`fixedclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -msgid "Client-side fixed clipping modifier." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "Handle incoming message and return results, following the SecAgg protocol." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 msgid "" -"Handle incoming message and return results, following the SecAgg+ " -"protocol." +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\," -" ctxt\\, call\\_next\\)" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -msgid "Message size mod." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of -msgid "Parameters size mod." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," -" sensitivity\\, ...\\)" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -msgid "Modifier for local differential privacy." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -msgid "LocalDpMod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 msgid "" -"This mod clips the client model updates and adds noise to the params " -"before sending them to the server." +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -msgid "The value of the clipping norm." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: ../../source/how-to-upgrade-to-flower-next.rst:2 +msgid "Upgrade to Flower Next" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-next.rst:4 msgid "" -"The failure probability. The probability that the privacy mechanism fails" -" to provide the desired level of privacy. A smaller value of delta " -"indicates a stricter privacy guarantee." +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "Create an instance of the local DP mod and add it to the client-side mods:" +#: ../../source/how-to-upgrade-to-flower-next.rst:11 +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:15 +msgid "Let's dive in!" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:68 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " -"wrapper." +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -msgid "The wrapper sends the clipping_norm value to the client." +#: ../../source/how-to-upgrade-to-flower-next.rst:74 +msgid "or if you need Flower Next with simulation:" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "This mod clips the client model updates before sending them to the server." -msgstr "" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: ../../source/how-to-upgrade-to-flower-next.rst:80 msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." -msgstr "" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -msgid "Notes" -msgstr "" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." +"Ensure you set the following version constraint in your " +"``requirements.txt``" msgstr "" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of -msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +#: ../../source/how-to-upgrade-to-flower-next.rst:90 +msgid "or ``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 -msgid "fixedclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:101 +msgid "Using Poetry" msgstr "" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:103 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." +#: ../../source/how-to-upgrade-to-flower-next.rst:106 +msgid "" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" +#: ../../source/how-to-upgrade-to-flower-next.rst:123 +msgid "" +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +msgid "|clientapp_link|_" msgstr "" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." +#: ../../source/how-to-upgrade-to-flower-next.rst:134 +msgid "" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 -msgid "parameters\\_size\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +msgid "|serverapp_link|_" msgstr "" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:159 msgid "" -"This mod logs the number of parameters transmitted in the message as well" -" as their size in bytes." +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" msgstr "" -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:180 +msgid "Deployment" msgstr "" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 -msgid "secaggplus\\_mod" +#: ../../source/how-to-upgrade-to-flower-next.rst:182 +msgid "" +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" +#: ../../source/how-to-upgrade-to-flower-next.rst:185 +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" msgstr "" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" +#: ../../source/how-to-upgrade-to-flower-next.rst:201 +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." msgstr "" -#: ../../source/ref-api/flwr.client.start_client.rst:2 -msgid "start\\_client" +#: ../../source/how-to-upgrade-to-flower-next.rst:229 +msgid "Simulation in CLI" msgstr "" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-next.rst:231 msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" +#: ../../source/how-to-upgrade-to-flower-next.rst:264 +msgid "" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-upgrade-to-flower-next.rst:281 msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of -msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +#: ../../source/how-to-upgrade-to-flower-next.rst:305 +msgid "Simulation in a Notebook" msgstr "" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of +#: ../../source/how-to-upgrade-to-flower-next.rst:307 msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" msgstr "" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-upgrade-to-flower-next.rst:351 msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of -msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +msgid "Important" msgstr "" -#: flwr.client.app.start_client:31 of +#: ../../source/how-to-upgrade-to-flower-next.rst:360 msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" msgstr "" -#: flwr.client.app.start_client:35 of -msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +msgid "Happy migrating! 🚀" msgstr "" -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" +#: ../../source/how-to-use-built-in-mods.rst:2 +msgid "Use Built-in Mods" msgstr "" -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -msgid "Starting an SSL-enabled gRPC client using system certificates:" +#: ../../source/how-to-use-built-in-mods.rst:4 +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" msgstr "" -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -msgid "Starting an SSL-enabled gRPC client using provided certificates:" +#: ../../source/how-to-use-built-in-mods.rst:7 +msgid "" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -msgid "start\\_numpy\\_client" +#: ../../source/how-to-use-built-in-mods.rst:12 +msgid "What are Mods?" msgstr "" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-use-built-in-mods.rst:14 msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +#: ../../source/how-to-use-built-in-mods.rst:23 +msgid "A typical mod function might look something like this:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" +#: ../../source/how-to-use-built-in-mods.rst:36 +msgid "Using Mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +#: ../../source/how-to-use-built-in-mods.rst:38 +msgid "To use mods in your ``ClientApp``, you can follow these steps:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of -msgid "Create Array from NumPy ndarray." +#: ../../source/how-to-use-built-in-mods.rst:41 +msgid "1. Import the required mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +#: ../../source/how-to-use-built-in-mods.rst:43 +msgid "First, import the built-in mod you intend to use:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." +#: ../../source/how-to-use-built-in-mods.rst:51 +msgid "2. Define your client function" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:53 msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." +#: ../../source/how-to-use-built-in-mods.rst:62 +msgid "3. Create the ``ClientApp`` with mods" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:64 msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +#: ../../source/how-to-use-built-in-mods.rst:78 +msgid "Order of execution" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:80 msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." +#: ../../source/how-to-use-built-in-mods.rst:83 +msgid "``example_mod_1`` (outermost mod)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +#: ../../source/how-to-use-built-in-mods.rst:84 +msgid "``example_mod_2`` (next mod)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." +#: ../../source/how-to-use-built-in-mods.rst:85 +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid ":py:obj:`now `\\ \\(\\)" +#: ../../source/how-to-use-built-in-mods.rst:87 +msgid "``example_mod_2`` (on the way back)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." +#: ../../source/how-to-use-built-in-mods.rst:88 +msgid "``example_mod_1`` (outermost mod on the way back)" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-use-built-in-mods.rst:90 msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." +#: ../../source/how-to-use-built-in-mods.rst:97 +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +#: ../../source/how-to-use-built-in-mods.rst:101 +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" msgstr "" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." +#: ../../source/how-to-use-differential-privacy.rst:2 +msgid "Use Differential Privacy" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:4 msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of -msgid "Array type." +#: ../../source/how-to-use-differential-privacy.rst:10 +msgid "" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:17 msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." +#: ../../source/how-to-use-differential-privacy.rst:21 +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Code `\\ \\(value\\)" +#: ../../source/how-to-use-differential-privacy.rst:26 +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." +#: ../../source/how-to-use-differential-privacy.rst:31 +msgid "Server-side Clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:33 msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "Configs record." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "server side clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Context `\\ \\(state\\)" +#: ../../source/how-to-use-differential-privacy.rst:43 +msgid "" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of -msgid "State of your run." +#: ../../source/how-to-use-differential-privacy.rst:64 +msgid "Client-side Clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +#: ../../source/how-to-use-differential-privacy.rst:66 +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "client side clipping" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:78 msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:97 msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." +#: ../../source/how-to-use-differential-privacy.rst:115 +msgid "" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/how-to-use-differential-privacy.rst:-1 +msgid "local DP mod" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." +#: ../../source/how-to-use-differential-privacy.rst:125 +msgid "Below is a code example that shows how to use ``LocalDpMod``:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +#: ../../source/how-to-use-differential-privacy.rst:140 +msgid "" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." +#: ../../source/how-to-use-differential-privacy.rst:145 +msgid "Local Training using Privacy Engines" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:147 msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of -msgid "A dataclass that stores information about an error that occurred." +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +#: ../../source/how-to-use-strategies.rst:10 +msgid "Use an existing strategy, for example, ``FedAvg``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +#: ../../source/how-to-use-strategies.rst:17 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." +#: ../../source/how-to-use-strategies.rst:27 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:45 msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:51 msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -msgid "State of your application from the viewpoint of the entity using it." +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageType `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:84 +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of -msgid "Message type." +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of -msgid "Legacy message type." +#: ../../source/how-to-use-strategies.rst:95 +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:101 msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of -msgid "A dataclass holding metadata associated with the current message." +#: ../../source/index.rst:34 +msgid "Tutorial" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of -msgid "Metrics record." +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`NDArray `\\" +#: ../../source/index.rst:106 +msgid "Legacy example guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +#: None:-1 +msgid "API reference" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." +#: ../../source/index.rst:145 +msgid "Reference docs" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +#: ../../source/index.rst:160 +msgid "Contributor tutorials" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "Parameters record." +#: ../../source/index.rst:167 +msgid "Contributor how-to guides" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +#: ../../source/index.rst:179 +msgid "Contributor explanations" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." +#: ../../source/index.rst:185 +msgid "Contributor references" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:-1 msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of -msgid "RecordSet stores groups of parameters, metrics and configs." +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/index.rst:7 msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." +#: ../../source/index.rst:11 +msgid "Join the Flower Community" msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." msgstr "" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." +#: ../../source/index.rst:16 +msgid "Join us on Slack" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:2 -msgid "Array" +#: ../../source/index.rst:23 +msgid "Flower Framework" msgstr "" -#: flwr.common.record.parametersrecord.Array:3 of +#: ../../source/index.rst:25 msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" -#: flwr.common.record.parametersrecord.Array:6 of -msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" +#: ../../source/index.rst:30 +msgid "Tutorials" msgstr "" -#: flwr.common.record.parametersrecord.Array:8 of +#: ../../source/index.rst:32 msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." +"A learning-oriented series of federated learning tutorials, the best " +"place to start." msgstr "" -#: flwr.common.record.parametersrecord.Array:12 of +#: ../../source/index.rst:62 msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." -msgstr "" - -#: flwr.common.record.parametersrecord.Array:15 of -msgid "A buffer of bytes containing the data." +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -msgid ":py:obj:`numpy `\\ \\(\\)" +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" msgstr "" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -msgid "Return the array as a NumPy array." +#: ../../source/index.rst:75 +msgid "And TensorFlow:" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`dtype `\\" +#: ../../source/index.rst:83 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`shape `\\" +#: ../../source/index.rst:116 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`stype `\\" +#: ../../source/index.rst:128 +msgid "References" msgstr "" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -msgid ":py:obj:`data `\\" +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -msgid "ClientMessage" +#: ../../source/index.rst:139::1 +msgid ":py:obj:`flwr `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`evaluate_res `\\" +#: ../../source/index.rst:139::1 flwr:1 of +msgid "Flower main package." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid ":py:obj:`fit_res `\\" +#: ../../source/index.rst:155 +msgid "Contributor docs" msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +#: ../../source/index.rst:157 msgid "" -":py:obj:`get_parameters_res " -"`\\" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." msgstr "" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -msgid "" -":py:obj:`get_properties_res " -"`\\" +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:2 -msgid "Code" +#: ../../source/ref-api-cli.rst:7 +msgid "flwr CLI" msgstr "" -#: flwr.common.typing.Code:1 of -msgid "Bases: :py:class:`~enum.Enum`" +#: ../../flwr:1 +msgid "flwr is the Flower command line interface." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`OK `\\" +#: ../../source/ref-api-cli.rst +msgid "Options" msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +#: ../../flwr:1 +msgid "Install completion for the current shell." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../flwr:1 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." msgstr "" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 +#: ../../flwr build:1 msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" msgstr "" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -msgid "ConfigsRecord" +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../flwr install:1 +msgid "Install a Flower App Bundle." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Remove all items from R." +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "Return number of Bytes stored in this object." +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of -msgid "d defaults to None." +#: ../../flwr install:1 +msgid "" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +#: ../../flwr install:1 +msgid "The desired install path." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -msgstr "" +#: ../../source/ref-api-cli.rst +#, fuzzy +msgid "Arguments" +msgstr "Argumento de compilação" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -msgid "Update R from dict/iterable E and F." -msgstr "" +#: ../../flwr install:1 log:1 new:1 run:1 +#, fuzzy +msgid "Optional argument" +msgstr "Argumento de compilação" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../flwr install:1 +msgid "The source FAB file to install." msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -msgid "This function counts booleans as occupying 1 Byte." +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -msgid "Context" +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" msgstr "" -#: flwr.common.context.Context:3 of -msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" +#: ../../flwr log run +msgid "default" msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -msgid ":py:obj:`state `\\" +#: ../../flwr log:1 +msgid "``True``" msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -msgid "DisconnectRes" -msgstr "" +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "Argumento de compilação" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -msgid ":py:obj:`reason `\\" +#: ../../flwr log:1 +msgid "The Flower run ID to query" msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -msgid "Error" +#: ../../flwr log:1 +msgid "Path of the Flower project to run" msgstr "" -#: flwr.common.message.Error:3 of -msgid "An identifier for the error." +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" msgstr "" -#: flwr.common.message.Error:5 of -msgid "A reason for why the error arose (e.g. an exception stack-trace)" +#: ../../flwr new:1 +msgid "Create new Flower App." msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`code `\\" +#: ../../flwr new:1 +msgid "The ML framework to use" msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -msgid "Error code." +#: ../../flwr new +msgid "options" msgstr "" -#: flwr.common.Error.code:1::1 of -msgid ":py:obj:`reason `\\" +#: ../../flwr new:1 +msgid "" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of -msgid "Reason reported about the error." +#: ../../flwr new:1 +msgid "The Flower username of the author" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -msgid "EvaluateIns" +#: ../../flwr new:1 +#, fuzzy +msgid "The name of the Flower App" +msgstr "O nome do repositório da imagem base." + +#: ../../flwr run:1 +msgid "Run Flower App." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../flwr run:1 +msgid "" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -msgid "EvaluateRes" +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../flwr run:1 +msgid "" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`loss `\\" +#: ../../flwr run:1 +#, fuzzy +msgid "``False``" +msgstr "``FLWR_VERSION``" + +#: ../../flwr run:1 +#, fuzzy +msgid "Path of the Flower App to run." +msgstr "O nome do repositório da imagem base." + +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api-cli.rst:16 +msgid "flower-simulation" msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:2 -msgid "EventType" +#: ../../source/ref-api-cli.rst:36 +msgid "flower-supernode" msgstr "" -#: flwr.common.telemetry.EventType:1 of -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +#: ../../source/ref-api-cli.rst:46 +msgid "flower-server-app" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:50 msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -msgid "Encode the string using the codec registered for encoding." +#: ../../source/ref-api-cli.rst:64 +msgid "flower-superexec" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" +#: ../../source/ref-api/flwr.rst:2 +msgid "flwr" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of -msgid "Return a copy with all occurrences of substring old replaced by new." +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 +msgid "Modules" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.client `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of -msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.common `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of -msgid "Concatenate any number of strings." +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.server `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`capitalize `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -msgid "Return a capitalized version of the string." +#: ../../source/ref-api/flwr.rst:35::1 +msgid ":py:obj:`flwr.simulation `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`casefold `\\ \\(\\)" +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of +msgid "Flower simulation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of -msgid "Return a version of the string suitable for caseless comparisons." +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`title `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 +msgid "Functions" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of -msgid "Return a version of the string where each word is titlecased." +#: ../../source/ref-api/flwr.client.rst:23::1 +msgid "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of -msgid "Return a centered string of length width." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of -msgid "Return a copy where all tab characters are expanded using spaces." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 +msgid "Classes" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of -msgid "Partition the string into three parts using the given separator." +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`Client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:32::1 msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of -msgid "Return a left-justified string of length width." +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lower `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of +msgid "Flower ClientApp." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -msgid "Return a copy of the string converted to lowercase." +#: ../../source/ref-api/flwr.client.rst:32::1 +msgid ":py:obj:`NumPyClient `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of -msgid "Return a copy of the string with leading whitespace removed." +#: ../../source/ref-api/flwr.client.rst:50::1 +msgid ":py:obj:`flwr.client.mod `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of +msgid "Flower Built-in Mods." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of +msgid "Bases: :py:class:`~abc.ABC`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 +msgid "Methods" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`evaluate `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of -msgid "Return a right-justified string of length width." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`fit `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of -msgid "Return a copy of the string with trailing whitespace removed." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of +msgid "Get the run context from this client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of -msgid "Return a list of the lines in the string, breaking at line boundaries." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -msgid "Return a copy of the string with leading and trailing whitespace removed." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`get_properties `\\ \\(ins\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`swapcase `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`set_context `\\ \\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of +msgid "Apply a run context to this client." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of -msgid "Replace each character in the string using the given translation table." +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`upper `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -msgid "Return a copy of the string converted to uppercase." +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +msgid "Attributes" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" +#: flwr.client.Client.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S starts with the specified prefix, False otherwise." +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "" + +#: flwr.client.client.Client.evaluate:3 of msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return True if S ends with the specified suffix, False otherwise." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.evaluate:8 of msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -msgid "Return a str with the given prefix string removed if present." +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client.fit:3 of msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -msgid "Return a str with the given suffix string removed if present." +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isascii `\\ \\(\\)" +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -msgid "Return True if all characters in the string are ASCII, False otherwise." +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`islower `\\ \\(\\)" +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -msgid "Return True if the string is a lowercase string, False otherwise." +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isupper `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +msgid "ClientApp" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -msgid "Return True if the string is an uppercase string, False otherwise." +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "Bases: :py:class:`object`" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`istitle `\\ \\(\\)" +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of -msgid "Return True if the string is a title-cased string, False otherwise." +#: flwr.client.client_app.ClientApp:5 of +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isspace `\\ \\(\\)" +#: flwr.client.client_app.ClientApp:16 of +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of -msgid "Return True if the string is a whitespace string, False otherwise." +#: flwr.client.client_app.ClientApp:21 of +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdecimal `\\ \\(\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`evaluate `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isdigit `\\ \\(\\)" +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid "Return a decorator that registers the evaluate fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -msgid "Return True if the string is a digit string, False otherwise." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`query `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isnumeric `\\ \\(\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of +msgid "Return a decorator that registers the query fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of -msgid "Return True if the string is a numeric string, False otherwise." +#: flwr.client.client_app.ClientApp.evaluate:1::1 of +msgid ":py:obj:`train `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalpha `\\ \\(\\)" +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of +msgid "Return a decorator that registers the train fn with the client app." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of -msgid "Return True if the string is an alphabetic string, False otherwise." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isalnum `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of -msgid "Return True if the string is an alpha-numeric string, False otherwise." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isidentifier `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of -msgid "Return True if the string is a valid Python identifier, False otherwise." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`get_context `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`isprintable `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -msgid "Return True if the string is printable, False otherwise." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid "" +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." +":py:obj:`set_context `\\ " +"\\(context\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +msgid ":py:obj:`to_client `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from args and kwargs." +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`format_map `\\ \\(mapping\\)" +#: flwr.client.NumPyClient.context:1::1 of +msgid ":py:obj:`context `\\" msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid "Return a formatted version of S, using substitutions from mapping." +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -msgid ":py:obj:`maketrans `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -msgid "Return a translation table usable for str.translate()." +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`PING `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_ENTER `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_CLIENT_LEAVE `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_ENTER `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_SERVER_LEAVE `\\" +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.rst:2 +msgid "mod" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_CONNECT `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of +msgid "Client-side adaptive clipping modifier." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_ENTER `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid "" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid ":py:obj:`START_DRIVER_LEAVE `\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of +msgid "Client-side fixed clipping modifier." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +msgid "Message size mod." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +msgid "Parameters size mod." msgstr "" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" msgstr "" -#: flwr.common.EventType.capitalize:1::1 of -msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -#: flwr.common.EventType.capitalize:3 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "" - -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of -msgid "Padding is done using the specified fill character (default is a space)." +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -#: flwr.common.EventType.count:1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." -msgstr "" - -#: flwr.common.EventType.encode:3 of -msgid "encoding" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -#: flwr.common.EventType.encode:4 of -msgid "The encoding in which to encode the string." +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +msgid "" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" msgstr "" -#: flwr.common.EventType.encode:9 of -msgid "errors" +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of +msgid "Modifier for local differential privacy." msgstr "" -#: flwr.common.EventType.encode:6 of -msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 +msgid "LocalDpMod" msgstr "" -#: flwr.common.EventType.endswith:1 of +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." msgstr "" -#: flwr.common.EventType.expandtabs:3 of -msgid "If tabsize is not given, a tab size of 8 characters is assumed." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of +msgid "The value of the clipping norm." msgstr "" -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -msgid "Return -1 on failure." +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." msgstr "" -#: flwr.common.EventType.format:1 of +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -#: flwr.common.EventType.format_map:1 of +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of -msgid "Raises ValueError when the substring is not found." +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" msgstr "" -#: flwr.common.EventType.isalnum:3 of -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" msgstr "" -#: flwr.common.EventType.isalpha:3 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." msgstr "" -#: flwr.common.EventType.isascii:3 of -msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of +msgid "The wrapper sends the clipping_norm value to the client." msgstr "" -#: flwr.common.EventType.isdecimal:3 of -msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." msgstr "" -#: flwr.common.EventType.isdigit:3 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." msgstr "" -#: flwr.common.EventType.isidentifier:3 of -msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of +msgid "Notes" msgstr "" -#: flwr.common.EventType.islower:3 of -msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." msgstr "" -#: flwr.common.EventType.isnumeric:3 of -msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.isprintable:3 of -msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +msgid "fixedclipping\\_mod" msgstr "" -#: flwr.common.EventType.isspace:3 of +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." msgstr "" -#: flwr.common.EventType.istitle:3 of -msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." msgstr "" -#: flwr.common.EventType.isupper:3 of -msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" msgstr "" -#: flwr.common.EventType.join:3 of -msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" msgstr "" -#: flwr.common.EventType.join:6 of -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." msgstr "" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of -msgid "If chars is given and not None, remove characters in chars instead." +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +msgid "parameters\\_size\\_mod" msgstr "" -#: flwr.common.EventType.maketrans:3 of +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." msgstr "" -#: flwr.common.EventType.partition:3 of -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" msgstr "" -#: flwr.common.EventType.partition:7 of -msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +msgid "secaggplus\\_mod" msgstr "" -#: flwr.common.EventType.removeprefix:3 of -msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." +#: ../../source/ref-api/flwr.client.start_client.rst:2 +msgid "start\\_client" msgstr "" -#: flwr.common.EventType.removesuffix:3 of +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." msgstr "" -#: flwr.common.EventType.replace:5 of -msgid "count" +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" msgstr "" -#: flwr.common.EventType.replace:4 of +#: flwr.client.app.start_client:9 of msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" msgstr "" -#: flwr.common.EventType.replace:7 of +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." msgstr "" -#: flwr.common.EventType.rpartition:3 of +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." msgstr "" -#: flwr.common.EventType.rpartition:7 of +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." -msgstr "" - -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -msgid "sep" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" msgstr "" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of -msgid "The separator used to split the string." +#: flwr.client.app.start_client:31 of +msgid "" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." msgstr "" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: flwr.client.app.start_client:35 of msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." msgstr "" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of -msgid "maxsplit" +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" msgstr "" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of -msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of +msgid "Starting an SSL-enabled gRPC client using system certificates:" msgstr "" -#: flwr.common.EventType.rsplit:13 of -msgid "Splitting starts at the end of the string and works to the front." +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of +msgid "Starting an SSL-enabled gRPC client using provided certificates:" msgstr "" -#: flwr.common.EventType.split:13 of -msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 +msgid "start\\_numpy\\_client" msgstr "" -#: flwr.common.EventType.splitlines:3 of +#: flwr.client.app.start_numpy_client:5 of msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." msgstr "" -#: flwr.common.EventType.startswith:1 of -msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." msgstr "" -#: flwr.common.EventType.title:3 of -msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" msgstr "" -#: flwr.common.EventType.translate:5 of -msgid "table" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" msgstr "" -#: flwr.common.EventType.translate:4 of -msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of +msgid "Create Array from NumPy ndarray." msgstr "" -#: flwr.common.EventType.translate:7 of -msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" msgstr "" -#: flwr.common.EventType.zfill:3 of -msgid "The string is never truncated." +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 -msgid "FitIns" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." msgstr "" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 -msgid "FitRes" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" msgstr "" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -msgid ":py:obj:`metrics `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -msgid "GetParametersIns" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -msgid "GetParametersRes" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid ":py:obj:`now `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." msgstr "" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -msgid ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:30::1 +msgid "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 -msgid "GetPropertiesIns" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 -msgid "GetPropertiesRes" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of +msgid "Array type." msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -msgid ":py:obj:`properties `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:2 -msgid "Message" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Code `\\ \\(value\\)" msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of -msgid "A dataclass including information about the message to be executed." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." msgstr "" -#: flwr.common.message.Message:5 of -msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Config `\\" msgstr "" -#: flwr.common.message.Message:8 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of -msgid "Construct a reply message indicating an error happened." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "Configs record." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of -msgid "Create a reply to this message with specified content and TTL." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of +msgid "Context of your run." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_content `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of -msgid "Return True if message has content, else False." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -msgid ":py:obj:`has_error `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of -msgid "Return True if message has an error, else False." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of +msgid "A dataclass that stores information about an error that occurred." msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`content `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" msgstr "" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of -msgid "The content of this message." -msgstr "" - -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`error `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." msgstr "" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of -msgid "Error captured by this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.Message.content:1::1 of -msgid ":py:obj:`metadata `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." msgstr "" -#: flwr.common.message.Message.create_error_reply:3 of -msgid "The error that was encountered." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`EventType `\\ \\(value\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of -msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." msgstr "" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of -msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" msgstr "" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." msgstr "" -#: flwr.common.message.Message.create_reply:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" msgstr "" -#: flwr.common.message.Message.create_reply:7 of -msgid "The content for the reply message." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." msgstr "" -#: flwr.common.message.Message.create_reply:16 of -msgid "A new `Message` instance representing the reply." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 -msgid "MessageType" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`EVALUATE `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`QUERY `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." msgstr "" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -msgid ":py:obj:`TRAIN `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 -msgid "MessageTypeLegacy" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PARAMETERS `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -msgid ":py:obj:`GET_PROPERTIES `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -msgid "An identifier for the current run." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of -msgid "An identifier for the current message." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of +msgid "State of your application from the viewpoint of the entity using it." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of -msgid "An identifier for the node sending this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageType `\\ \\(\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of -msgid "An identifier for the node receiving this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of +msgid "Message type." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of -msgid "An identifier for the message this message replies to." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" msgstr "" -#: flwr.common.message.Metadata:13 of -msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of +msgid "Legacy message type." msgstr "" -#: flwr.common.message.Metadata:16 of -msgid "Time-to-live for this message in seconds." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of -msgid "A string that encodes the action to be executed on the receiving end." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of +msgid "A dataclass holding metadata associated with the current message." msgstr "" -#: flwr.common.message.Metadata:21 of -msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Metrics `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`created_at `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of -msgid "Unix timestamp when the message was created." +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of +msgid "Metrics recod." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`dst_node_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArray `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`group_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of -msgid "An identifier for grouping messages." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`NDArrays `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`message_type `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`partition_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." msgstr "" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of -msgid "An identifier telling which data partition a ClientApp should use." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`reply_to_message `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "Parameters record." msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`run_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Properties `\\" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`src_node_id `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" msgstr "" -#: flwr.common.Metadata.created_at:1::1 of -msgid ":py:obj:`ttl `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." msgstr "" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of -msgid "Time-to-live for this message." +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 -msgid "MetricsRecord" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of +msgid "RecordSet stores groups of parameters, metrics and configs." msgstr "" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Array.rst:2 +msgid "Array" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: flwr.common.record.parametersrecord.Array:3 of +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.common.record.parametersrecord.Array:6 of +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.common.record.parametersrecord.Array:8 of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: flwr.common.record.parametersrecord.Array:12 of +msgid "" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -msgid "NDArray" +#: flwr.common.record.parametersrecord.Array:15 of +msgid "A buffer of bytes containing the data." msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensors `\\" +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +msgid ":py:obj:`numpy `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 -msgid ":py:obj:`tensor_type `\\" +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of +msgid "Return the array as a NumPy array." msgstr "" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 -msgid "ParametersRecord" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`dtype `\\" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`shape `\\" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of -msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`stype `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`clear `\\ \\(\\)" +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +msgid ":py:obj:`data `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`count_bytes `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 +msgid "ClientMessage" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`evaluate_res `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`items `\\ \\(\\)" -msgstr "" - -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`keys `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid ":py:obj:`fit_res `\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 +msgid "" +":py:obj:`get_parameters_res " +"`\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +":py:obj:`get_properties_res " +"`\\" msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -msgid ":py:obj:`values `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Code.rst:2 +msgid "Code" msgstr "" -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of -msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." +#: flwr.common.typing.Code:1 of +msgid "Bases: :py:class:`~enum.Enum`" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 -msgid "ReconnectIns" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`OK `\\" msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 -msgid ":py:obj:`seconds `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 -msgid "RecordSet" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`configs_records `\\" +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" msgstr "" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -msgid "Dictionary holding ConfigsRecord instances." +#: ../../source/ref-api/flwr.common.Code.rst:26::1 +msgid "" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`metrics_records `\\" +#: ../../source/ref-api/flwr.common.Config.rst:2 +msgid "Config" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -msgid "Dictionary holding MetricsRecord instances." +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 +msgid "ConfigsRecord" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of -msgid ":py:obj:`parameters_records `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of -msgid "Dictionary holding ParametersRecord instances." +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 -msgid "ServerMessage" +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`evaluate_ins `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 -msgid ":py:obj:`fit_ins `\\" +#: flwr.common.record.configsrecord.ConfigsRecord:21 of +msgid "" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.common.record.configsrecord.ConfigsRecord:29 of msgid "" -":py:obj:`get_parameters_ins " -"`\\" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" msgstr "" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: flwr.common.record.configsrecord.ConfigsRecord:42 of msgid "" -":py:obj:`get_properties_ins " -"`\\" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:2 -msgid "Status" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`code `\\" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 -msgid ":py:obj:`message `\\" +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of +msgid "Return number of Bytes stored in this object." msgstr "" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 -msgid "array\\_from\\_numpy" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 -msgid "bytes\\_to\\_ndarray" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.configure.rst:2 -msgid "configure" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.common.event.rst:2 -msgid "event" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.log.rst:2 -msgid "log" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." msgstr "" -#: logging.Logger.log:3 of -msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." msgstr "" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 -msgid "ndarray\\_to\\_bytes" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 -msgid "ndarrays\\_to\\_parameters" +#: collections.abc.MutableMapping.clear:1::1 of +msgid "" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: ../../source/ref-api/flwr.common.now.rst:2 -msgid "now" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of +msgid "" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" msgstr "" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 -msgid "parameters\\_to\\_ndarrays" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of +msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_server_app `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Context.rst:2 +msgid "Context" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of -msgid "Run Flower server app." +#: flwr.common.context.Context:3 of +msgid "The ID that identifies the node." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -msgid ":py:obj:`run_superlink `\\ \\(\\)" +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of -msgid "Run Flower SuperLink (Driver API and Fleet API)." +#: flwr.common.context.Context:8 of +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: flwr.common.context.Context:15 of msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`node_id `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`ClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`node_config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of -msgid "Abstract base class for managing Flower clients." +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`state `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`Driver `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.Context.rst:31::1 +msgid ":py:obj:`run_config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of -msgid "Abstract base Driver class for the Driver API." +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 +msgid "DisconnectRes" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`History `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 +msgid ":py:obj:`reason `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of -msgid "History class for training and/or evaluation metrics collection." +#: ../../source/ref-api/flwr.common.Error.rst:2 +msgid "Error" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +#: flwr.common.message.Error:3 of +msgid "An identifier for the error." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Legacy Context." +#: flwr.common.message.Error:5 of +msgid "A reason for why the error arose (e.g. an exception stack-trace)" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`code `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of +msgid "Error code." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of -msgid "Flower ServerApp." +#: flwr.common.Error.code:1::1 of +msgid ":py:obj:`reason `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +msgid "Reason reported about the error." msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of -msgid "Flower server config." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 +msgid "EvaluateIns" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Provides a pool of available clients." +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.strategy `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 +msgid "EvaluateRes" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -msgid ":py:obj:`flwr.server.workflow `\\" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`loss `\\" msgstr "" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of -msgid "Workflows." +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 -msgid "ClientManager" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:2 +msgid "EventType" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "Return all available clients." +#: flwr.common.telemetry.EventType:1 of +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`num_available `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of -msgid "Return the number of available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of +msgid "Encode the string using the codec registered for encoding." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`register `\\ \\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of -msgid "Register Flower ClientProxy instance." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +msgid "Return a copy with all occurrences of substring old replaced by new." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" - -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of -msgid "Sample a number of Flower ClientProxy instances." -msgstr "" - -#: flwr.server.client_manager.ClientManager.all:1::1 of -msgid ":py:obj:`unregister `\\ \\(client\\)" -msgstr "" - -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of -msgid "Unregister Flower ClientProxy instance." +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +"Return a list of the substrings in the string, using sep as the separator" +" string." msgstr "" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of -msgid "Wait until at least `num_clients` are available." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of -msgid "**num_available** -- The number of currently available clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of +msgid "Concatenate any number of strings." msgstr "" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of -msgid "This method is idempotent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`capitalize `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.Driver.rst:2 -msgid "Driver" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of +msgid "Return a capitalized version of the string." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`casefold `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid "Create a new message with specified parameters." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of +msgid "Return a version of the string suitable for caseless comparisons." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of -msgid ":py:obj:`get_node_ids `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`title `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of -msgid "Get node IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of +msgid "Return a version of the string where each word is titlecased." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of -msgid "Pull messages based on message IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of +msgid "Return a centered string of length width." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" -msgstr "" - -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of -msgid "Push messages to specified node IDs." +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of -msgid "Push messages to specified node IDs and pull the reply messages." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:3 of -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of +msgid "Return a copy where all tab characters are expanded using spaces." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:12 of -msgid "The ID of the destination node to which the message is being sent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:14 of -msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +msgid "Partition the string into three parts using the given separator." msgstr "" -#: flwr.server.driver.driver.Driver.create_message:17 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:3 of -msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of +msgid "Return a left-justified string of length width." msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:6 of -msgid "An iterable of message IDs for which reply messages are to be retrieved." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lower `\\ \\(\\)" msgstr "" -#: flwr.server.driver.driver.Driver.pull_messages:9 of -msgid "**messages** -- An iterable of messages received." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of +msgid "Return a copy of the string converted to lowercase." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:3 of -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of -msgid "An iterable of messages to be sent." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of +msgid "Return a copy of the string with leading whitespace removed." msgstr "" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of -msgid "**replies** -- An iterable of reply messages received from the SuperLink." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" msgstr "" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of +msgid "Return a right-justified string of length width." msgstr "" -#: ../../source/ref-api/flwr.server.History.rst:2 -msgid "History" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of +msgid "Return a copy of the string with trailing whitespace removed." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "Add one loss entry (from centralized evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of -msgid "Add one loss entry (from distributed evaluation)." -msgstr "" - -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of +msgid "Return a list of the lines in the string, breaking at line boundaries." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of -msgid "Add metrics entries (from centralized evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of +msgid "Return a copy of the string with leading and trailing whitespace removed." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of -msgid "Add metrics entries (from distributed evaluation)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`swapcase `\\ \\(\\)" msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." msgstr "" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of -msgid "Add metrics entries (from distributed fit)." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 -msgid "LegacyContext" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of +msgid "Replace each character in the string using the given translation table." msgstr "" -#: flwr.server.compat.legacy_context.LegacyContext:1 of -msgid "Bases: :py:class:`~flwr.common.context.Context`" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`upper `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of +msgid "Return a copy of the string converted to uppercase." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`strategy `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`client_manager `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S starts with the specified prefix, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`history `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 -msgid ":py:obj:`state `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return True if S ends with the specified suffix, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`client_manager `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of +msgid "Return a str with the given prefix string removed if present." msgstr "" -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -msgid "Return ClientManager." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of +msgid "Return a str with the given suffix string removed if present." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -msgid "Send shutdown signal to all clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isascii `\\ \\(\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of +msgid "Return True if all characters in the string are ASCII, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -msgid "Validate current global model on a number of clients." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`islower `\\ \\(\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of +msgid "Return True if the string is a lowercase string, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of -msgid "Run federated averaging for a number of rounds." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isupper `\\ \\(\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of +msgid "Return True if the string is an uppercase string, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of -msgid "Perform a single round of federated averaging." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`istitle `\\ \\(\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of +msgid "Return True if the string is a title-cased string, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of -msgid "Set the max_workers used by ThreadPoolExecutor." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isspace `\\ \\(\\)" msgstr "" -#: flwr.server.server.Server.client_manager:1::1 of -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of +msgid "Return True if the string is a whitespace string, False otherwise." msgstr "" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of -msgid "Replace server strategy." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdecimal `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 -msgid "ServerApp" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of +msgid "Return True if the string is a decimal string, False otherwise." msgstr "" -#: flwr.server.server_app.ServerApp:5 of -msgid "Use the `ServerApp` with an existing `Strategy`:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isdigit `\\ \\(\\)" msgstr "" -#: flwr.server.server_app.ServerApp:15 of -msgid "Use the `ServerApp` with a custom main function:" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of +msgid "Return True if the string is a digit string, False otherwise." msgstr "" -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid ":py:obj:`main `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isnumeric `\\ \\(\\)" msgstr "" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of -msgid "Return a decorator that registers the main fn with the server app." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of +msgid "Return True if the string is a numeric string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 -msgid "ServerConfig" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalpha `\\ \\(\\)" msgstr "" -#: flwr.server.server_config.ServerConfig:3 of -msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of +msgid "Return True if the string is an alphabetic string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`num_rounds `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isalnum `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 -msgid ":py:obj:`round_timeout `\\" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of +msgid "Return True if the string is an alpha-numeric string, False otherwise." msgstr "" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 -msgid "SimpleClientManager" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isidentifier `\\ \\(\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager:1 of -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of +msgid "Return True if the string is a valid Python identifier, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid ":py:obj:`all `\\ \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`isprintable `\\ \\(\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of +msgid "Return True if the string is printable, False otherwise." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of -msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from args and kwargs." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of -msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`format_map `\\ \\(mapping\\)" msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of -msgid "The number of clients to wait for." +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid "Return a formatted version of S, using substitutions from mapping." msgstr "" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of -msgid "The time in seconds to wait for, defaults to 86400 (24h)." -msgstr "" - -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of -msgid "**success**" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +msgid ":py:obj:`maketrans `\\" msgstr "" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 -msgid "run\\_driver\\_api" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of +msgid "Return a translation table usable for str.translate()." msgstr "" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 -msgid "run\\_fleet\\_api" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`PING `\\" msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_ENTER `\\" msgstr "" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_CLIENT_LEAVE `\\" msgstr "" -#: ../../source/ref-api/flwr.server.start_server.rst:2 -msgid "start\\_server" +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_ENTER `\\" msgstr "" -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +#: flwr.common.EventType.capitalize:1::1 of +msgid ":py:obj:`START_SERVER_LEAVE `\\" msgstr "" -#: flwr.server.app.start_server:5 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." +":py:obj:`START_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." +":py:obj:`START_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:12 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:16 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:21 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.capitalize:1::1 of msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:32 of -msgid "CA certificate." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:33 of -msgid "server certificate." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:34 of -msgid "server private key." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" msgstr "" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:2 -msgid "strategy" +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:1::1 of msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." +#: flwr.common.EventType.capitalize:1::1 of +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.capitalize:3 of msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +"More specifically, make the first character have upper case and the rest " +"lower case." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of +msgid "Padding is done using the specified fill character (default is a space)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.count:1 of msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +#: flwr.common.EventType.encode:3 of +msgid "encoding" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +#: flwr.common.EventType.encode:4 of +msgid "The encoding in which to encode the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side adaptive clipping." +#: flwr.common.EventType.encode:9 of +msgid "errors" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.encode:6 of msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side adaptive clipping." +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.endswith:1 of msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with client-side fixed clipping." +#: flwr.common.EventType.expandtabs:3 of +msgid "If tabsize is not given, a tab size of 8 characters is assumed." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of -msgid "Strategy wrapper for central DP with server-side fixed clipping." +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +msgid "Return -1 on failure." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.format:1 of msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +#: flwr.common.EventType.format_map:1 of +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +msgid "Raises ValueError when the substring is not found." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." +#: flwr.common.EventType.isalnum:3 of +msgid "" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isalpha:3 of msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." +#: flwr.common.EventType.isascii:3 of +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isdecimal:3 of msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isdigit:3 of msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." +#: flwr.common.EventType.isidentifier:3 of +msgid "" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.islower:3 of msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of -msgid "Configurable FedMedian strategy implementation." +#: flwr.common.EventType.isnumeric:3 of +msgid "" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isprintable:3 of msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of -msgid "Federated Optim strategy." +#: flwr.common.EventType.isspace:3 of +msgid "" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.istitle:3 of msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." +#: flwr.common.EventType.isupper:3 of +msgid "" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.join:3 of msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +#: flwr.common.EventType.join:6 of +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "" + +#: flwr.common.EventType.maketrans:3 of msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of -msgid "Configurable FedXgbBagging strategy implementation." +#: flwr.common.EventType.partition:3 of +msgid "" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.partition:7 of msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of -msgid "Configurable FedXgbCyclic strategy implementation." +#: flwr.common.EventType.removeprefix:3 of +msgid "" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.removesuffix:3 of msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." +#: flwr.common.EventType.replace:5 of +msgid "count" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.replace:4 of msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." +#: flwr.common.EventType.replace:7 of +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." +#: flwr.common.EventType.rpartition:3 of +msgid "" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rpartition:7 of msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of -msgid "Krum [Blanchard et al., 2017] strategy." +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of +msgid "sep" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +msgid "The separator used to split the string." +msgstr "" + +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +msgid "maxsplit" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -msgid ":py:obj:`Strategy `\\ \\(\\)" +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +msgid "" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." +#: flwr.common.EventType.rsplit:13 of +msgid "Splitting starts at the end of the string and works to the front." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 -msgid "Bulyan" +#: flwr.common.EventType.split:13 of +msgid "" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +#: flwr.common.EventType.splitlines:3 of +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." +#: flwr.common.EventType.startswith:1 of +msgid "" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." +#: flwr.common.EventType.title:3 of +msgid "" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." +#: flwr.common.EventType.translate:5 of +msgid "table" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." +#: flwr.common.EventType.translate:4 of +msgid "" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." +#: flwr.common.EventType.translate:7 of +msgid "" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." +#: flwr.common.EventType.zfill:3 of +msgid "The string is never truncated." msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." +#: ../../source/ref-api/flwr.common.FitIns.rst:2 +msgid "FitIns" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." +#: ../../source/ref-api/flwr.common.FitRes.rst:2 +msgid "FitRes" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:27 of -msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`num_examples `\\" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +msgid ":py:obj:`metrics `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +msgid "GetParametersIns" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +msgid "GetParametersRes" msgstr "" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +msgid ":py:obj:`parameters `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +msgid "GetPropertiesIns" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +msgid "GetPropertiesRes" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`status `\\" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +msgid ":py:obj:`properties `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.Message.rst:2 +msgid "Message" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +msgid "A dataclass including information about the message to be executed." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.message.Message:5 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." +#: flwr.common.message.Message:8 of +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +msgid "Construct a reply message indicating an error happened." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid "" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +msgid "Create a reply to this message with specified content and TTL." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of -msgid "This class is deprecated and will be removed in a future release." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_content `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +msgid "Return True if message has content, else False." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +msgid ":py:obj:`has_error `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +msgid "Return True if message has an error, else False." msgstr "" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`content `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 #: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "The content of this message." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`error `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +msgid "Error captured by this message." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.Message.content:1::1 of +msgid ":py:obj:`metadata `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." +#: flwr.common.message.Message.create_error_reply:3 of +msgid "The error that was encountered." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of +msgid "" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +#: flwr.common.message.Message.create_reply:3 of msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" +#: flwr.common.message.Message.create_reply:7 of +msgid "The content for the reply message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +#: flwr.common.message.Message.create_reply:16 of +msgid "A new `Message` instance representing the reply." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:2 +msgid "MessageType" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`EVALUATE `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`QUERY `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +msgid ":py:obj:`TRAIN `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +msgid "MessageTypeLegacy" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of -msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PARAMETERS `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +msgid ":py:obj:`GET_PROPERTIES `\\" msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +msgid "An identifier for the current run." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of -msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +msgid "An identifier for the current message." msgstr "" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +msgid "An identifier for the node sending this message." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyClientSideAdaptiveClipping" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +msgid "An identifier for the node receiving this message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of -msgid "Use `adaptiveclipping_mod` modifier at the client side." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +msgid "An identifier for the message this message replies to." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: flwr.common.message.Metadata:13 of msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of -msgid "The strategy to which DP functionalities will be added by this wrapper." +#: flwr.common.message.Metadata:16 of +msgid "Time-to-live for this message in seconds." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of -msgid "The noise multiplier for the Gaussian mechanism for model updates." +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +msgid "A string that encodes the action to be executed on the receiving end." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of -msgid "The number of clients that are sampled on each round." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`created_at `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of +msgid "Unix timestamp when the message was created." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`dst_node_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`group_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of +msgid "An identifier for grouping messages." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 -#: of -msgid "Create a strategy:" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`message_type `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`reply_to_message `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`run_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`src_node_id `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -msgid "Aggregate training results and update clip norms." +#: flwr.common.Metadata.created_at:1::1 of +msgid ":py:obj:`ttl `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 #: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "Time-to-live for this message." msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Metrics.rst:2 +msgid "Metrics" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +msgid "MetricsRecord" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 -msgid "DifferentialPrivacyClientSideFixedClipping" +#: flwr.common.record.metricsrecord.MetricsRecord:3 of +msgid "" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of -msgid "Use `fixedclipping_mod` modifier at the client side." +#: flwr.common.record.metricsrecord.MetricsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:12 of msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +#: flwr.common.record.metricsrecord.MetricsRecord:39 of +msgid "" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:50 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of -msgid "Add noise to the aggregated parameters." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 -msgid "DifferentialPrivacyServerSideAdaptiveClipping" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.NDArray.rst:2 +msgid "NDArray" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 +msgid "NDArrays" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensors `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +msgid ":py:obj:`tensor_type `\\" msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +msgid "ParametersRecord" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 -msgid "DifferentialPrivacyServerSideFixedClipping" +#: flwr.common.record.parametersrecord.ParametersRecord:1 of +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of -msgid "Compute the updates, clip, and pass them for aggregation." +#: flwr.common.record.parametersrecord.ParametersRecord:23 of +msgid "" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +#: flwr.common.record.parametersrecord.ParametersRecord:27 of +msgid "Let's see some examples:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of -msgid "Afterward, add noise to the aggregated parameters." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`clear `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 -msgid "FaultTolerantFedAvg" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`items `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`keys `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`popitem `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: collections.abc.MutableMapping.clear:1::1 of +msgid ":py:obj:`values `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" -msgstr "" - -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +#: ../../source/ref-api/flwr.common.Properties.rst:2 +msgid "Properties" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +msgid "ReconnectIns" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +msgid ":py:obj:`seconds `\\" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +msgid "RecordSet" msgstr "" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +#: flwr.common.record.recordset.RecordSet:3 of +msgid "" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.common.record.recordset.RecordSet:29 of +msgid "Let's see an example." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`configs_records `\\" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 -msgid "FedAdam" +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of +msgid "Dictionary holding ConfigsRecord instances." msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`metrics_records `\\" msgstr "" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +msgid "Dictionary holding MetricsRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.common.RecordSet.configs_records:1::1 of +msgid ":py:obj:`parameters_records `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +msgid "Dictionary holding ParametersRecord instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +msgid "ServerMessage" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`evaluate_ins `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +msgid ":py:obj:`fit_ins `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.Status.rst:2 +msgid "Status" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 -msgid "FedAvg" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`code `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" +#: ../../source/ref-api/flwr.common.Status.rst:29::1 +msgid ":py:obj:`message `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +msgid "array\\_from\\_numpy" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +msgid "bytes\\_to\\_ndarray" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg:33 of -msgid "Enable (True) or disable (False) in-place aggregation of model updates." +#: ../../source/ref-api/flwr.common.configure.rst:2 +msgid "configure" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.common.event.rst:2 +msgid "event" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.common.log.rst:2 +msgid "log" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: logging.Logger.log:3 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +msgid "ndarray\\_to\\_bytes" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +msgid "ndarrays\\_to\\_parameters" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.now.rst:2 +msgid "now" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +msgid "parameters\\_to\\_ndarrays" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 -msgid "FedAvgAndroid" +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:22::1 msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`ClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of -msgid "Deserialize NumPy array from bytes." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of +msgid "Abstract base class for managing Flower clients." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`Driver `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +msgid "Abstract base Driver class for the Driver API." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`History `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of +msgid "History class for training and/or evaluation metrics collection." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of -msgid "Serialize NumPy array to bytes." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Legacy Context." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of +msgid "Flower ServerApp." msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of -msgid "Convert parameters object to NumPy weights." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 -msgid "FedAvgM" +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid "" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of -msgid "Implementation based on https://arxiv.org/abs/1909.06335" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of +msgid "Flower server config." msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:25 of -msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." +#: ../../source/ref-api/flwr.server.rst:37::1 +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Provides a pool of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.rst:56::1 +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of +msgid "Workflows." +msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +msgid "ClientManager" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "Return all available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`num_available `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 -msgid "FedMedian" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +msgid "Return the number of available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`register `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +msgid "Register Flower ClientProxy instance." +msgstr "" + +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of +msgid "Sample a number of Flower ClientProxy instances." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 of +msgid ":py:obj:`unregister `\\ \\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +msgid "Unregister Flower ClientProxy instance." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +msgid "Wait until at least `num_clients` are available." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +msgid "**num_available** -- The number of currently available clients." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 -msgid "FedOpt" +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +msgid "This method is idempotent." msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.Driver.rst:2 +msgid "Driver" msgstr "" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid "" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of +msgid "Create a new message with specified parameters." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +msgid ":py:obj:`get_node_ids `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of +msgid "Get node IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of +msgid "Pull messages based on message IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of +msgid "Push messages to specified node IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 -msgid "FedProx" +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of +msgid "Push messages to specified node IDs and pull the reply messages." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid ":py:obj:`run `\\" msgstr "" -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of +msgid "Run information." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: flwr.server.driver.driver.Driver.create_message:3 of msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:12 of +#: flwr.server.driver.driver.Driver.create_message:6 of msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." +"The content for the new message. This holds records that are to be sent " +"to the destination node." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" +#: flwr.server.driver.driver.Driver.create_message:9 of +msgid "" +"The type of the message, defining the action to be executed on the " +"receiving end." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" +#: flwr.server.driver.driver.Driver.create_message:12 of +msgid "The ID of the destination node to which the message is being sent." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:30 of +#: flwr.server.driver.driver.Driver.create_message:14 of msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." msgstr "" -#: flwr.server.strategy.fedprox.FedProx:65 of +#: flwr.server.driver.driver.Driver.create_message:17 of msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:23 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"**message** -- A new `Message` instance with the specified content and " +"metadata." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:3 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +msgid "An iterable of message IDs for which reply messages are to be retrieved." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.driver.driver.Driver.pull_messages:9 of +msgid "**messages** -- An iterable of messages received." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:3 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of +msgid "An iterable of messages to be sent." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:9 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." msgstr "" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" +#: flwr.server.driver.driver.Driver.send_and_receive:9 of +msgid "" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 -msgid "FedTrimmedAvg" +#: flwr.server.driver.driver.Driver.send_and_receive:14 of +msgid "**replies** -- An iterable of reply messages received from the SuperLink." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +#: flwr.server.driver.driver.Driver.send_and_receive:19 of +msgid "" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +#: ../../source/ref-api/flwr.server.History.rst:2 +msgid "History" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +msgid "Add one loss entry (from centralized evaluation)." +msgstr "" + +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of +msgid "Add one loss entry (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +msgid "Add metrics entries (from centralized evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of +msgid "Add metrics entries (from distributed evaluation)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of +msgid "Add metrics entries (from distributed fit)." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 -msgid "FedXgbBagging" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +msgid "LegacyContext" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.compat.legacy_context.LegacyContext:1 of +msgid "Bases: :py:class:`~flwr.common.context.Context`" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation metrics using average." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of -msgid "Aggregate fit results using bagging." +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`client_manager `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`history `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`node_id `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`node_config `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`state `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +msgid ":py:obj:`run_config `\\" msgstr "" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 -msgid "FedXgbCyclic" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`client_manager `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of +msgid "Return ClientManager." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of +msgid "Send shutdown signal to all clients." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of +msgid "Validate current global model on a number of clients." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of +msgid "Run federated averaging for a number of rounds." msgstr "" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 -msgid "FedXgbNnAvg" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of +msgid "Perform a single round of federated averaging." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of +msgid "Set the max_workers used by ThreadPoolExecutor." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 of +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of +msgid "Replace server strategy." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +msgid "ServerApp" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: flwr.server.server_app.ServerApp:5 of +msgid "Use the `ServerApp` with an existing `Strategy`:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server_app.ServerApp:17 of +msgid "Use the `ServerApp` with a custom main function:" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid ":py:obj:`main `\\ \\(\\)" msgstr "" -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -msgid "FedYogi" +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of +msgid "Return a decorator that registers the main fn with the server app." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:32 of -msgid "Server-side learning rate. Defaults to 1e-2." +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 +msgid "ServerAppComponents" msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:34 of -msgid "Client-side learning rate. Defaults to 0.0316." +#: flwr.server.serverapp_components.ServerAppComponents:3 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." msgstr "" -#: flwr.server.strategy.fedyogi.FedYogi:40 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:9 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.serverapp_components.ServerAppComponents:13 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`client_manager " +"`\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`config `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`server `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 +msgid ":py:obj:`strategy `\\" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +msgid "ServerConfig" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_config.ServerConfig:3 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 -msgid "Krum" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`num_rounds `\\" msgstr "" -#: flwr.server.strategy.krum.Krum:3 of -msgid "Implementation based on https://arxiv.org/abs/1703.02757" +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +msgid ":py:obj:`round_timeout `\\" msgstr "" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +msgid "SimpleClientManager" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +#: flwr.server.client_manager.SimpleClientManager:1 of +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid ":py:obj:`all `\\ \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +msgid "" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." msgstr "" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +msgid "The number of clients to wait for." msgstr "" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 -msgid "QFedAvg" +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +msgid "The time in seconds to wait for, defaults to 86400 (24h)." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +msgid "**success**" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: ../../source/ref-api/flwr.server.start_server.rst:2 +msgid "start\\_server" msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:12 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" - -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 -msgid "Strategy" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation results." +#: flwr.server.app.start_server:32 of +msgid "CA certificate." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +#: flwr.server.app.start_server:33 of +msgid "server certificate." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." +#: flwr.server.app.start_server:34 of +msgid "server private key." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." +#: ../../source/ref-api/flwr.server.strategy.rst:2 +msgid "strategy" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." -msgstr "" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of -msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of -msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of -msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side adaptive clipping." msgstr "" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:2 -msgid "workflow" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of +msgid "Strategy wrapper for central DP with client-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -msgid "Default workflow in Flower." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of +msgid "Strategy wrapper for central DP with server-side adaptive clipping." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: of -msgid "The workflow for the SecAgg+ protocol." +msgid "Strategy wrapper for central DP with server-side fixed clipping." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "The workflow for the SecAgg protocol." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." msgstr "" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 -msgid "DefaultWorkflow" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 -msgid "SecAggPlusWorkflow" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -msgid "key shares." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of -msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of -msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of -msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedMedian strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of -msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Federated Optim strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of -msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -msgid "Too large `max_weight` may compromise the precision of the quantization." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid "" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of -msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +msgid "Configurable FedXgbBagging strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of -msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +msgid "Configurable FedXgbCyclic strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "Execute the 'collect masked vectors' stage." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -msgid "Execute the 'setup' stage." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -msgid "Execute the 'share keys' stage." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of +msgid "Krum [Blanchard et al., 2017] strategy." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -msgid "Execute the 'unmask' stage." +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" msgstr "" -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -msgid "SecAggWorkflow" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +msgid ":py:obj:`Strategy `\\ \\(\\)" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of -msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of -msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +msgid "Bulyan" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of -msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of -msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of -msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." msgstr "" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of -msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" -msgstr "" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." msgstr "" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:2 -msgid "simulation" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." msgstr "" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -msgid "Run a Flower App using the Simulation Engine." +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." msgstr "" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -msgid "run\\_simulation" +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:3 of +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:6 of -msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:9 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:13 of -msgid "A simulation backend that runs `ClientApp`s." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: flwr.simulation.run_simulation.run_simulation:19 of -msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." msgstr "" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 -msgid "start\\_simulation" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." msgstr "" -#: flwr.simulation.app.start_simulation:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:13 of -msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." msgstr "" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: flwr.simulation.app.start_simulation:20 of -msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." msgstr "" -#: flwr.simulation.app.start_simulation:25 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: flwr.simulation.app.start_simulation:31 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." msgstr "" -#: flwr.simulation.app.start_simulation:35 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:39 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." msgstr "" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." msgstr "" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" msgstr "" -#: flwr.simulation.app.start_simulation:48 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" msgstr "" -#: flwr.simulation.app.start_simulation:50 of -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +msgid "This class is deprecated and will be removed in a future release." msgstr "" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: flwr.simulation.app.start_simulation:57 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." msgstr "" -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." msgstr "" -#: ../../source/ref-changelog.md:3 -msgid "v1.9.0 (2024-06-10)" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." msgstr "" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:9 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." msgstr "" -#: ../../source/ref-changelog.md:13 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:15 -msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." msgstr "" -#: ../../source/ref-changelog.md:17 -msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." msgstr "" -#: ../../source/ref-changelog.md:19 -msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." msgstr "" -#: ../../source/ref-changelog.md:21 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." msgstr "" -#: ../../source/ref-changelog.md:23 -msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" msgstr "" -#: ../../source/ref-changelog.md:25 -msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" msgstr "" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:31 -msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." msgstr "" -#: ../../source/ref-changelog.md:33 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:35 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +"Configure the next round of training incorporating Differential Privacy " +"(DP)." msgstr "" -#: ../../source/ref-changelog.md:39 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." msgstr "" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -#: ../../source/ref-changelog.md:47 -msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyClientSideAdaptiveClipping" msgstr "" -#: ../../source/ref-changelog.md:49 -msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +msgid "Use `adaptiveclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-changelog.md:51 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -#: ../../source/ref-changelog.md:53 -msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +msgid "The strategy to which DP functionalities will be added by this wrapper." msgstr "" -#: ../../source/ref-changelog.md:55 -msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +msgid "The noise multiplier for the Gaussian mechanism for model updates." msgstr "" -#: ../../source/ref-changelog.md:57 -msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +msgid "The number of clients that are sampled on each round." msgstr "" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." msgstr "" -#: ../../source/ref-changelog.md:61 -msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." msgstr "" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." msgstr "" -#: ../../source/ref-changelog.md:65 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:67 -msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +msgid "Create a strategy:" msgstr "" -#: ../../source/ref-changelog.md:69 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" msgstr "" -#: ../../source/ref-changelog.md:71 -msgid "As always, Flower code examples have received many updates." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" msgstr "" -#: ../../source/ref-changelog.md:73 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:77 -msgid "**Deprecate Python 3.8 support**" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +msgid "Aggregate training results and update clip norms." msgstr "" -#: ../../source/ref-changelog.md:79 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:81 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:83 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:85 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:87 -msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +msgid "DifferentialPrivacyClientSideFixedClipping" msgstr "" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +msgid "Use `fixedclipping_mod` modifier at the client side." msgstr "" -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -#: ../../source/ref-changelog.md:93 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." msgstr "" -#: ../../source/ref-changelog.md:95 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" msgstr "" -#: ../../source/ref-changelog.md:97 -msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" msgstr "" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:103 -msgid "v1.8.0 (2024-04-03)" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of +msgid "Add noise to the aggregated parameters." msgstr "" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:117 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:119 -msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +msgid "DifferentialPrivacyServerSideAdaptiveClipping" msgstr "" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" msgstr "" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:125 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:127 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:129 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:133 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:135 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:137 -msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +msgid "DifferentialPrivacyServerSideFixedClipping" msgstr "" -#: ../../source/ref-changelog.md:139 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" msgstr "" -#: ../../source/ref-changelog.md:141 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:143 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:145 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:147 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:149 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:151 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:153 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +msgid "Afterward, add noise to the aggregated parameters." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +msgid "FaultTolerantFedAvg" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:155 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:157 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:159 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:161 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:167 -msgid "v1.7.0 (2024-02-05)" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:173 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:177 -msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" msgstr "" -#: ../../source/ref-changelog.md:179 -msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" msgstr "" -#: ../../source/ref-changelog.md:181 -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" msgstr "" -#: ../../source/ref-changelog.md:183 -msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." msgstr "" -#: ../../source/ref-changelog.md:185 -msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." msgstr "" -#: ../../source/ref-changelog.md:187 -msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." msgstr "" -#: ../../source/ref-changelog.md:189 -msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." msgstr "" -#: ../../source/ref-changelog.md:191 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:193 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:195 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:197 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:199 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:201 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:203 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:205 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:207 -msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +msgid "FedAdam" msgstr "" -#: ../../source/ref-changelog.md:209 -msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." msgstr "" -#: ../../source/ref-changelog.md:211 -msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." msgstr "" -#: ../../source/ref-changelog.md:213 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:215 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:217 -msgid "Many Flower code examples received substantial updates." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -msgid "**Update Flower Baselines**" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:221 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:222 -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:223 -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:224 -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:225 -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +msgid "FedAvg" msgstr "" -#: ../../source/ref-changelog.md:226 -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" msgstr "" -#: ../../source/ref-changelog.md:228 +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:230 +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -#: ../../source/ref-changelog.md:232 -msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." +#: flwr.server.strategy.fedavg.FedAvg:33 of +msgid "Enable (True) or disable (False) in-place aggregation of model updates." msgstr "" -#: ../../source/ref-changelog.md:234 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:236 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:240 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:242 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:244 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:248 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:250 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:252 -msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +msgid "FedAvgAndroid" msgstr "" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:256 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:258 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -#: ../../source/ref-changelog.md:260 -msgid "v1.6.0 (2023-11-28)" +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +msgid "Deserialize NumPy array from bytes." msgstr "" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:270 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:272 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:276 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" +msgstr "" + +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +msgid "Serialize NumPy array to bytes." msgstr "" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" msgstr "" -#: ../../source/ref-changelog.md:280 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:282 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:284 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:286 -msgid "Add gRPC request-response capability to the Android SDK." +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +msgid "Convert parameters object to NumPy weights." +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +msgid "FedAvgM" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of +msgid "Implementation based on https://arxiv.org/abs/1909.06335" msgstr "" -#: ../../source/ref-changelog.md:288 +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." msgstr "" -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:292 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:298 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:300 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:302 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:304 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:306 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" + +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +msgid "FedMedian" msgstr "" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:314 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." msgstr "" -#: ../../source/ref-changelog.md:316 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:320 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:322 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:324 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:326 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:328 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +msgid "FedOpt" msgstr "" -#: ../../source/ref-changelog.md:330 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:332 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." msgstr "" -#: ../../source/ref-changelog.md:334 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:336 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:338 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:340 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:342 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:344 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:346 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 -msgid "Flower received many improvements under the hood, too many to list here." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:352 -msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +msgid "FedProx" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" msgstr "" -#: ../../source/ref-changelog.md:354 +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" msgstr "" -#: ../../source/ref-changelog.md:356 +#: flwr.server.strategy.fedprox.FedProx:9 of msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." msgstr "" -#: ../../source/ref-changelog.md:360 -msgid "v1.5.0 (2023-08-31)" +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" msgstr "" -#: ../../source/ref-changelog.md:366 -msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" msgstr "" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.fedprox.FedProx:30 of msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +"With `global_params` being a copy of the parameters before the training " +"takes place." msgstr "" -#: ../../source/ref-changelog.md:372 +#: flwr.server.strategy.fedprox.FedProx:65 of msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:378 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:380 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:384 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:386 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:388 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:390 -msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" msgstr "" -#: ../../source/ref-changelog.md:392 -msgid "**Deprecate Python 3.7**" +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +msgid "FedTrimmedAvg" msgstr "" -#: ../../source/ref-changelog.md:394 -msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" msgstr "" -#: ../../source/ref-changelog.md:396 -msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." msgstr "" -#: ../../source/ref-changelog.md:398 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:400 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:402 -msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." msgstr "" -#: ../../source/ref-changelog.md:404 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:406 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:408 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:410 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:412 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:414 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:416 -msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +msgid "FedXgbBagging" msgstr "" -#: ../../source/ref-changelog.md:418 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:420 -msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation metrics using average." msgstr "" -#: ../../source/ref-changelog.md:422 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:424 -msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +msgid "Aggregate fit results using bagging." msgstr "" -#: ../../source/ref-changelog.md:426 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:428 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:430 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:434 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:436 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +msgid "FedXgbCyclic" msgstr "" -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:456 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:458 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:460 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:462 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:464 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:466 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:468 -msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +msgid "FedXgbNnAvg" msgstr "" -#: ../../source/ref-changelog.md:470 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -#: ../../source/ref-changelog.md:472 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:474 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:476 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:478 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:480 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:482 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:484 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:486 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:488 -msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +msgid "FedYogi" msgstr "" -#: ../../source/ref-changelog.md:490 -msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +#: flwr.server.strategy.fedyogi.FedYogi:32 of +msgid "Server-side learning rate. Defaults to 1e-2." msgstr "" -#: ../../source/ref-changelog.md:492 -msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" +#: flwr.server.strategy.fedyogi.FedYogi:34 of +msgid "Client-side learning rate. Defaults to 0.0316." msgstr "" -#: ../../source/ref-changelog.md:494 -msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." +#: flwr.server.strategy.fedyogi.FedYogi:40 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." msgstr "" -#: ../../source/ref-changelog.md:496 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:498 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:500 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" -msgstr "" - -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:514 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:518 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:520 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:522 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:524 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:526 -msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +msgid "Krum" msgstr "" -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." +#: flwr.server.strategy.krum.Krum:3 of +msgid "Implementation based on https://arxiv.org/abs/1703.02757" msgstr "" -#: ../../source/ref-changelog.md:530 +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." msgstr "" -#: ../../source/ref-changelog.md:532 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:534 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:536 -msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." msgstr "" -#: ../../source/ref-changelog.md:538 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:540 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:542 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:544 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:546 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:548 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:550 -msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +msgid "QFedAvg" msgstr "" -#: ../../source/ref-changelog.md:552 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:554 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:558 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "" - -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:572 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:576 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:578 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:580 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -#: ../../source/ref-changelog.md:582 -msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +msgid "Strategy" msgstr "" -#: ../../source/ref-changelog.md:584 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:586 -msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." msgstr "" -#: ../../source/ref-changelog.md:588 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -#: ../../source/ref-changelog.md:589 -msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." msgstr "" -#: ../../source/ref-changelog.md:590 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:591 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:593 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -#: ../../source/ref-changelog.md:595 -msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." msgstr "" -#: ../../source/ref-changelog.md:597 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -#: ../../source/ref-changelog.md:599 -msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." msgstr "" -#: ../../source/ref-changelog.md:601 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:603 -msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." msgstr "" -#: ../../source/ref-changelog.md:605 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." msgstr "" -#: ../../source/ref-changelog.md:607 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -#: ../../source/ref-changelog.md:609 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -#: ../../source/ref-changelog.md:611 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." msgstr "" -#: ../../source/ref-changelog.md:613 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." msgstr "" -#: ../../source/ref-changelog.md:615 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." msgstr "" -#: ../../source/ref-changelog.md:617 -msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" +#: ../../source/ref-api/flwr.server.workflow.rst:2 +msgid "workflow" msgstr "" -#: ../../source/ref-changelog.md:619 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:621 -msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +msgid "Default workflow in Flower." msgstr "" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -#: ../../source/ref-changelog.md:629 -msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +msgid "The workflow for the SecAgg+ protocol." msgstr "" -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +msgid "" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" msgstr "" -#: ../../source/ref-changelog.md:639 -msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +msgid "The workflow for the SecAgg protocol." msgstr "" -#: ../../source/ref-changelog.md:641 -msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +msgid "DefaultWorkflow" msgstr "" -#: ../../source/ref-changelog.md:645 -msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +msgid "SecAggPlusWorkflow" msgstr "" -#: ../../source/ref-changelog.md:647 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:649 -msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" msgstr "" -#: ../../source/ref-changelog.md:651 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-changelog.md:653 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -#: ../../source/ref-changelog.md:655 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." msgstr "" -#: ../../source/ref-changelog.md:657 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." msgstr "" -#: ../../source/ref-changelog.md:659 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." msgstr "" -#: ../../source/ref-changelog.md:661 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -#: ../../source/ref-changelog.md:663 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." msgstr "" -#: ../../source/ref-changelog.md:665 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." msgstr "" -#: ../../source/ref-changelog.md:667 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." msgstr "" -#: ../../source/ref-changelog.md:669 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." msgstr "" -#: ../../source/ref-changelog.md:671 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -#: ../../source/ref-changelog.md:673 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." msgstr "" -#: ../../source/ref-changelog.md:675 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:677 -msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +msgid "Too large `max_weight` may compromise the precision of the quantization." msgstr "" -#: ../../source/ref-changelog.md:679 -msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." msgstr "" -#: ../../source/ref-changelog.md:681 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." msgstr "" -#: ../../source/ref-changelog.md:683 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." msgstr "" -#: ../../source/ref-changelog.md:685 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -#: ../../source/ref-changelog.md:687 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:689 -msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "Execute the 'collect masked vectors' stage." msgstr "" -#: ../../source/ref-changelog.md:691 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +msgid "Execute the 'setup' stage." msgstr "" -#: ../../source/ref-changelog.md:699 -msgid "Highlights" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +msgid "Execute the 'share keys' stage." msgstr "" -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +msgid "Execute the 'unmask' stage." msgstr "" -#: ../../source/ref-changelog.md:704 -msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +msgid "SecAggWorkflow" msgstr "" -#: ../../source/ref-changelog.md:708 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -#: ../../source/ref-changelog.md:710 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -#: ../../source/ref-changelog.md:714 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -#: ../../source/ref-changelog.md:716 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." msgstr "" -#: ../../source/ref-changelog.md:718 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." msgstr "" -#: ../../source/ref-changelog.md:720 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -#: ../../source/ref-changelog.md:722 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -#: ../../source/ref-changelog.md:724 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +msgid "" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -#: ../../source/ref-changelog.md:730 -msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" +#: ../../source/ref-api/flwr.simulation.rst:2 +msgid "simulation" msgstr "" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +msgid "Run a Flower App using the Simulation Engine." msgstr "" -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -#: ../../source/ref-changelog.md:738 -msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -#: ../../source/ref-changelog.md:740 -msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +msgid "run\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:742 +#: flwr.simulation.run_simulation.run_simulation:3 of msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." msgstr "" -#: ../../source/ref-changelog.md:744 +#: flwr.simulation.run_simulation.run_simulation:6 of msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:746 +#: flwr.simulation.run_simulation.run_simulation:9 of msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." -msgstr "" - -#: ../../source/ref-changelog.md:748 -msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." msgstr "" -#: ../../source/ref-changelog.md:750 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +#: flwr.simulation.run_simulation.run_simulation:12 of +msgid "A simulation backend that runs `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:752 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -#: ../../source/ref-changelog.md:754 +#: flwr.simulation.run_simulation.run_simulation:21 of msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -#: ../../source/ref-changelog.md:756 +#: flwr.simulation.run_simulation.run_simulation:28 of msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." msgstr "" -#: ../../source/ref-changelog.md:758 -msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +msgid "start\\_simulation" msgstr "" -#: ../../source/ref-changelog.md:760 -msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: ../../source/ref-changelog.md:762 -msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: ../../source/ref-changelog.md:764 -msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:9 msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:770 -msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +#: ../../source/ref-changelog.md:11 +msgid "Improvements" msgstr "" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:13 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:15 msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:17 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" msgstr "" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:19 msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:21 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:23 msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:25 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" msgstr "" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:27 msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" +#: ../../source/ref-changelog.md:29 +msgid "" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" msgstr "" -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" +#: ../../source/ref-changelog.md:35 +msgid "v1.11.0 (2024-08-30)" msgstr "" -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" +#: ../../source/ref-changelog.md:41 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " +msgstr "" + +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" msgstr "" -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" +#: ../../source/ref-changelog.md:45 +msgid "" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" +#: ../../source/ref-changelog.md:47 +msgid "" +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:49 msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -#: ../../source/ref-changelog.md:799 -msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." msgstr "" -#: ../../source/ref-changelog.md:801 +#: ../../source/ref-changelog.md:53 msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:55 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." msgstr "" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" msgstr "" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:59 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." msgstr "" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:60 msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:61 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:63 msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:65 +msgid "" +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -#: ../../source/ref-changelog.md:816 -msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." msgstr "" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:68 msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." msgstr "" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:69 msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:70 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -#: ../../source/ref-changelog.md:824 +#: ../../source/ref-changelog.md:72 msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" msgstr "" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:74 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:76 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:78 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:80 msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" msgstr "" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:82 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." msgstr "" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:84 msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" msgstr "" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:86 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:88 msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" +msgstr "" + +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" msgstr "" -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:102 msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" msgstr "" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:104 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:113 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:115 msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:117 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -#: ../../source/ref-changelog.md:852 -msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -#: ../../source/ref-changelog.md:854 +#: ../../source/ref-changelog.md:121 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" msgstr "" -#: ../../source/ref-changelog.md:856 +#: ../../source/ref-changelog.md:123 msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" msgstr "" -#: ../../source/ref-changelog.md:858 -msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" msgstr "" -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:135 msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" msgstr "" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:137 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:864 -msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +#: ../../source/ref-changelog.md:139 +msgid "v1.10.0 (2024-07-24)" msgstr "" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:145 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:149 msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:151 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -#: ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:153 msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -#: ../../source/ref-changelog.md:872 +#: ../../source/ref-changelog.md:155 msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -#: ../../source/ref-changelog.md:873 +#: ../../source/ref-changelog.md:157 msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:159 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:161 msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." msgstr "" -#: ../../source/ref-changelog.md:876 +#: ../../source/ref-changelog.md:163 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" msgstr "" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:165 msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" -msgstr "" - -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:167 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:169 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:171 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:173 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -#: ../../source/ref-changelog.md:891 +#: ../../source/ref-changelog.md:175 msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" msgstr "" -#: ../../source/ref-changelog.md:893 +#: ../../source/ref-changelog.md:177 msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." msgstr "" -#: ../../source/ref-changelog.md:895 +#: ../../source/ref-changelog.md:179 msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" msgstr "" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:181 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." msgstr "" -#: ../../source/ref-changelog.md:899 +#: ../../source/ref-changelog.md:183 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" msgstr "" -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:185 msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." msgstr "" -#: ../../source/ref-changelog.md:903 +#: ../../source/ref-changelog.md:187 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" msgstr "" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:189 msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -#: ../../source/ref-changelog.md:907 +#: ../../source/ref-changelog.md:191 msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:193 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -#: ../../source/ref-changelog.md:911 +#: ../../source/ref-changelog.md:195 msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" msgstr "" -#: ../../source/ref-changelog.md:913 +#: ../../source/ref-changelog.md:197 msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." msgstr "" -#: ../../source/ref-changelog.md:915 +#: ../../source/ref-changelog.md:199 msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -#: ../../source/ref-changelog.md:917 +#: ../../source/ref-changelog.md:201 msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -#: ../../source/ref-changelog.md:919 +#: ../../source/ref-changelog.md:203 msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -#: ../../source/ref-changelog.md:921 -msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +#: ../../source/ref-changelog.md:207 +msgid "Documentation improvements" msgstr "" -#: ../../source/ref-changelog.md:923 +#: ../../source/ref-changelog.md:209 msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" msgstr "" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:211 msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -#: ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:213 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" msgstr "" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:215 msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -#: ../../source/ref-changelog.md:932 -msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:221 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -#: ../../source/ref-changelog.md:934 +#: ../../source/ref-changelog.md:223 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" msgstr "" -#: ../../source/ref-changelog.md:935 +#: ../../source/ref-changelog.md:225 msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -#: ../../source/ref-changelog.md:936 +#: ../../source/ref-changelog.md:229 msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" msgstr "" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:231 msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:233 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" msgstr "" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:235 msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." -msgstr "" - -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:949 -msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +#: ../../source/ref-changelog.md:237 +msgid "v1.9.0 (2024-06-10)" msgstr "" -#: ../../source/ref-changelog.md:951 +#: ../../source/ref-changelog.md:243 msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -#: ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:247 msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" msgstr "" -#: ../../source/ref-changelog.md:955 +#: ../../source/ref-changelog.md:249 msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -#: ../../source/ref-changelog.md:957 +#: ../../source/ref-changelog.md:251 msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:253 msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -#: ../../source/ref-changelog.md:960 +#: ../../source/ref-changelog.md:255 msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." msgstr "" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:257 msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" msgstr "" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:259 msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." msgstr "" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:261 msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:263 msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." msgstr "" -#: ../../source/ref-changelog.md:971 +#: ../../source/ref-changelog.md:265 msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" msgstr "" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:267 msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:269 msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:271 msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." msgstr "" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:273 msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -#: ../../source/ref-changelog.md:980 +#: ../../source/ref-changelog.md:275 msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." msgstr "" -#: ../../source/ref-changelog.md:982 +#: ../../source/ref-changelog.md:277 msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -#: ../../source/ref-changelog.md:984 +#: ../../source/ref-changelog.md:279 msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:281 msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:283 msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." -msgstr "" - -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -#: ../../source/ref-changelog.md:994 +#: ../../source/ref-changelog.md:285 msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:287 msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:289 msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:291 msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -#: ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:293 msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -#: ../../source/ref-changelog.md:1007 +#: ../../source/ref-changelog.md:295 msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -#: ../../source/ref-changelog.md:1009 +#: ../../source/ref-changelog.md:297 msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -#: ../../source/ref-changelog.md:1011 +#: ../../source/ref-changelog.md:299 msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -#: ../../source/ref-changelog.md:1013 +#: ../../source/ref-changelog.md:301 msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" +#: ../../source/ref-changelog.md:303 +msgid "" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -#: ../../source/ref-changelog.md:1017 -msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +#: ../../source/ref-changelog.md:305 +msgid "As always, Flower code examples have received many updates." msgstr "" -#: ../../source/ref-changelog.md:1021 +#: ../../source/ref-changelog.md:307 msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -#: ../../source/ref-changelog.md:1023 -msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +#: ../../source/ref-changelog.md:311 +msgid "**Deprecate Python 3.8 support**" msgstr "" -#: ../../source/ref-changelog.md:1025 +#: ../../source/ref-changelog.md:313 msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/ref-changelog.md:1027 +#: ../../source/ref-changelog.md:315 msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" +#: ../../source/ref-changelog.md:317 +msgid "" +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:319 msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:321 msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:325 msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:327 msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:329 msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" msgstr "" -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" +#: ../../source/ref-changelog.md:331 +msgid "" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:333 msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:335 msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -#: ../../source/ref-changelog.md:1070 -msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." +#: ../../source/ref-changelog.md:337 +msgid "v1.8.0 (2024-04-03)" msgstr "" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:343 msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:347 msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" msgstr "" -#: ../../source/ref-changelog.md:1089 +#: ../../source/ref-changelog.md:349 msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." msgstr "" -#: ../../source/ref-changelog.md:1091 +#: ../../source/ref-changelog.md:351 msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" msgstr "" -#: ../../source/ref-changelog.md:1093 +#: ../../source/ref-changelog.md:353 msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -#: ../../source/ref-changelog.md:1095 +#: ../../source/ref-changelog.md:355 msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" msgstr "" -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" +#: ../../source/ref-changelog.md:357 +msgid "" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." msgstr "" -#: ../../source/ref-changelog.md:1116 +#: ../../source/ref-changelog.md:359 msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" -msgstr "" - -#: ../../source/ref-changelog.md:1118 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" -msgstr "" - -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "" - -#: ../../source/ref-changelog.md:1120 -msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "" - -#: ../../source/ref-changelog.md:1121 -msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" -msgstr "" - -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" msgstr "" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:361 msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." -msgstr "" - -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" -msgstr "" - -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." msgstr "" -#: ../../source/ref-changelog.md:1131 +#: ../../source/ref-changelog.md:363 msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -#: ../../source/ref-changelog.md:1132 +#: ../../source/ref-changelog.md:365 msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." msgstr "" -#: ../../source/ref-changelog.md:1133 +#: ../../source/ref-changelog.md:367 msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" -msgstr "" - -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" -msgstr "" - -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" msgstr "" -#: ../../source/ref-changelog.md:1139 +#: ../../source/ref-changelog.md:369 msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" -msgstr "" - -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "" - -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "" - -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "" - -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." msgstr "" -#: ../../source/ref-changelog.md:1147 +#: ../../source/ref-changelog.md:371 msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -#: ../../source/ref-changelog.md:1148 +#: ../../source/ref-changelog.md:373 msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." msgstr "" -#: ../../source/ref-changelog.md:1149 +#: ../../source/ref-changelog.md:375 msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -#: ../../source/ref-changelog.md:1150 +#: ../../source/ref-changelog.md:377 msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:379 msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." -msgstr "" - -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" msgstr "" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:381 msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:383 msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -#: ../../source/ref-example-projects.rst:14 +#: ../../source/ref-changelog.md:385 msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -#: ../../source/ref-example-projects.rst:17 +#: ../../source/ref-changelog.md:387 msgid "" -"`Quickstart TensorFlow (Code) " -"`_" -msgstr "" - -#: ../../source/ref-example-projects.rst:18 -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" msgstr "" -#: ../../source/ref-example-projects.rst:19 +#: ../../source/ref-changelog.md:389 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" -msgstr "" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" msgstr "" -#: ../../source/ref-example-projects.rst:25 +#: ../../source/ref-changelog.md:391 msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:393 msgid "" -"`Quickstart PyTorch (Code) " -"`_" -msgstr "" - -#: ../../source/ref-example-projects.rst:29 -msgid ":doc:`Quickstart PyTorch (Tutorial) `" -msgstr "" - -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -#: ../../source/ref-example-projects.rst:35 +#: ../../source/ref-changelog.md:395 msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -#: ../../source/ref-example-projects.rst:37 -msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +#: ../../source/ref-changelog.md:401 +msgid "v1.7.0 (2024-02-05)" msgstr "" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:407 msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" -msgstr "" - -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:411 msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:413 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:415 msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" msgstr "" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:417 msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +#: ../../source/ref-changelog.md:419 +msgid "" +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -#: ../../source/ref-faq.rst:8 +#: ../../source/ref-changelog.md:421 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." msgstr "" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:423 msgid "" -"`Flower simulation PyTorch " -"`_" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:425 msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +#: ../../source/ref-changelog.md:427 +msgid "" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" msgstr "" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:429 msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +#: ../../source/ref-changelog.md:431 +msgid "" +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:433 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." msgstr "" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:435 msgid "" -"`Android Kotlin example `_" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" +#: ../../source/ref-changelog.md:437 +msgid "" +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +#: ../../source/ref-changelog.md:439 +msgid "" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:441 msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:443 msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:445 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-changelog.md:447 msgid "" -"`Flower meets KOSMoS `_." +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:449 msgid "" -"`Flower meets Talan blog post `_ ." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -#: ../../source/ref-faq.rst:32 -msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +#: ../../source/ref-changelog.md:451 +msgid "Many Flower code examples received substantial updates." msgstr "" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" msgstr "" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:455 msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -#: ../../source/ref-telemetry.md:5 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." +#: ../../source/ref-changelog.md:456 +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" msgstr "" -#: ../../source/ref-telemetry.md:7 -msgid "Principles" +#: ../../source/ref-changelog.md:457 +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" msgstr "" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" +#: ../../source/ref-changelog.md:458 +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" msgstr "" -#: ../../source/ref-telemetry.md:11 -msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." +#: ../../source/ref-changelog.md:459 +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" msgstr "" -#: ../../source/ref-telemetry.md:12 -msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +#: ../../source/ref-changelog.md:460 +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" msgstr "" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:462 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:464 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "" - -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:466 msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." msgstr "" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:468 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." -msgstr "" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" +#: ../../source/ref-changelog.md:470 +msgid "" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:474 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -#: ../../source/ref-telemetry.md:32 +#: ../../source/ref-changelog.md:476 msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -#: ../../source/ref-telemetry.md:34 +#: ../../source/ref-changelog.md:478 msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" msgstr "" -#: ../../source/ref-telemetry.md:36 +#: ../../source/ref-changelog.md:480 msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." msgstr "" -#: ../../source/ref-telemetry.md:38 +#: ../../source/ref-changelog.md:482 msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -#: ../../source/ref-telemetry.md:40 +#: ../../source/ref-changelog.md:484 msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -#: ../../source/ref-telemetry.md:42 +#: ../../source/ref-changelog.md:486 msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" msgstr "" -#: ../../source/ref-telemetry.md:44 +#: ../../source/ref-changelog.md:488 msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." msgstr "" -#: ../../source/ref-telemetry.md:46 +#: ../../source/ref-changelog.md:490 msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" msgstr "" -#: ../../source/ref-telemetry.md:48 +#: ../../source/ref-changelog.md:492 msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" +#: ../../source/ref-changelog.md:494 +msgid "v1.6.0 (2023-11-28)" msgstr "" -#: ../../source/ref-telemetry.md:52 +#: ../../source/ref-changelog.md:500 msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -#: ../../source/ref-telemetry.md:58 +#: ../../source/ref-changelog.md:504 msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:506 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:-1 +#: ../../source/ref-changelog.md:508 msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "" - -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." msgstr "" -#: ../../source/tutorial-quickstart-android.rst:10 +#: ../../source/ref-changelog.md:510 msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" msgstr "" -#: ../../source/tutorial-quickstart-android.rst:12 +#: ../../source/ref-changelog.md:512 msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 +#: ../../source/ref-changelog.md:514 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" +#: ../../source/ref-changelog.md:516 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/ref-changelog.md:518 +msgid "" +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:12 -msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +#: ../../source/ref-changelog.md:520 +msgid "Add gRPC request-response capability to the Android SDK." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#: ../../source/ref-changelog.md:522 msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/ref-changelog.md:526 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:528 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:530 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/ref-changelog.md:532 +msgid "" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/ref-changelog.md:534 +msgid "" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:536 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/ref-changelog.md:538 +msgid "" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:540 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +msgid "" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:548 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/ref-changelog.md:550 +msgid "" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/ref-changelog.md:552 +msgid "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:554 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 -msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 -msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 -msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 -msgid "" -"And they will be able to connect to the server and start the federated " -"training." +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/ref-changelog.md:570 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:572 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:574 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:576 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" +#: ../../source/ref-changelog.md:578 +msgid "" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:580 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:12 -msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:586 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-changelog.md:588 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:590 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" +#: ../../source/ref-changelog.md:592 +msgid "" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:36 +#: ../../source/ref-changelog.md:600 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:604 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:606 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:99 +#: ../../source/ref-changelog.md:608 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:102 +#: ../../source/ref-changelog.md:610 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:612 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:614 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:616 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" -msgstr "" - -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 +#: ../../source/ref-changelog.md:618 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:620 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." msgstr "" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:622 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:624 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:628 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" +#: ../../source/ref-changelog.md:630 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:632 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:634 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:636 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 +#: ../../source/ref-changelog.md:638 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:640 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:642 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:644 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:646 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:648 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:650 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" +#: ../../source/ref-changelog.md:652 +msgid "" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/ref-changelog.md:654 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:78 +#: ../../source/ref-changelog.md:656 msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:658 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:660 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:662 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:142 -msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:666 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:668 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" +#: ../../source/ref-changelog.md:670 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:684 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:688 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/ref-changelog.md:690 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:692 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" +#: ../../source/ref-changelog.md:694 +msgid "" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:696 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:698 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:700 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/ref-changelog.md:702 +msgid "" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:704 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/ref-changelog.md:706 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:708 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" +#: ../../source/ref-changelog.md:710 +msgid "" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:712 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." +"Please note: The REST API is still experimental and will likely change " +"significantly over time." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#: ../../source/ref-changelog.md:714 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:716 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:718 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -msgid "Since we want to use scikit-learn, let's go ahead and install it:" +#: ../../source/ref-changelog.md:720 +msgid "" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" +#: ../../source/ref-changelog.md:722 +msgid "" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:724 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" +#: ../../source/ref-changelog.md:726 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" +#: ../../source/ref-changelog.md:730 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#: ../../source/ref-changelog.md:732 +msgid "" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" +#: ../../source/ref-changelog.md:734 +msgid "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:748 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 +#: ../../source/ref-changelog.md:752 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 +#: ../../source/ref-changelog.md:754 msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:756 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:758 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" +#: ../../source/ref-changelog.md:760 +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:764 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 +#: ../../source/ref-changelog.md:766 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:768 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." -msgstr "" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#: ../../source/ref-changelog.md:770 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:772 msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 +#: ../../source/ref-changelog.md:774 msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:776 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#: ../../source/ref-changelog.md:778 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"An updated event structure allows, for example, the clustering of events " +"within the same workload." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" +#: ../../source/ref-changelog.md:780 +msgid "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" +#: ../../source/ref-changelog.md:782 +msgid "" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" +#: ../../source/ref-changelog.md:784 +msgid "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/ref-changelog.md:786 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/ref-changelog.md:788 +msgid "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:792 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 -msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/ref-changelog.md:806 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/ref-changelog.md:810 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/ref-changelog.md:812 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:814 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" +#: ../../source/ref-changelog.md:816 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" +#: ../../source/ref-changelog.md:818 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:820 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:822 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" +#: ../../source/ref-changelog.md:823 +msgid "" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:824 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:825 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:827 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" +#: ../../source/ref-changelog.md:829 +msgid "" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:41 +#: ../../source/ref-changelog.md:831 msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:47 +#: ../../source/ref-changelog.md:833 msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:57 +#: ../../source/ref-changelog.md:835 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:60 +#: ../../source/ref-changelog.md:837 msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" +#: ../../source/ref-changelog.md:839 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:841 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:843 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:121 +#: ../../source/ref-changelog.md:845 msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/ref-changelog.md:847 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:174 +#: ../../source/ref-changelog.md:849 msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:851 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:853 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:196 +#: ../../source/ref-changelog.md:855 msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:859 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:863 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:269 -msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:291 +#: ../../source/ref-changelog.md:873 msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:875 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:879 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/ref-changelog.md:881 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:883 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:885 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:356 +#: ../../source/ref-changelog.md:887 msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:889 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:891 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:893 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:895 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:897 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:899 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:596 +#: ../../source/ref-changelog.md:901 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:603 -msgid "Cyclic training" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:605 +#: ../../source/ref-changelog.md:903 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:609 +#: ../../source/ref-changelog.md:905 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:649 +#: ../../source/ref-changelog.md:907 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:690 +#: ../../source/ref-changelog.md:909 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:693 +#: ../../source/ref-changelog.md:911 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:913 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:792 +#: ../../source/ref-changelog.md:915 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:917 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:827 +#: ../../source/ref-changelog.md:919 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -msgid "Flower simulation" +#: ../../source/ref-changelog.md:921 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:832 +#: ../../source/ref-changelog.md:923 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:866 +#: ../../source/ref-changelog.md:925 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -msgid "Then, we define the strategies and other hyper-parameters:" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 +#: ../../source/ref-changelog.md:938 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 +#: ../../source/ref-changelog.md:942 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "" +#: ../../source/ref-changelog.md:944 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 +#: ../../source/ref-changelog.md:948 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -msgid "We also have an argument parser for simulation:" +#: ../../source/ref-changelog.md:950 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -msgid "This integrates all arguments for both client and server sides." +#: ../../source/ref-changelog.md:952 +msgid "" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" +#: ../../source/ref-changelog.md:954 +msgid "" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 +#: ../../source/ref-changelog.md:956 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" +#: ../../source/ref-changelog.md:958 +msgid "" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -msgid "To run the same experiment with Flower simulation:" +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" msgstr "" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:964 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:966 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 -msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" +#: ../../source/ref-changelog.md:970 +msgid "" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" +#: ../../source/ref-changelog.md:972 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:974 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" +#: ../../source/ref-changelog.md:976 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" +#: ../../source/ref-changelog.md:978 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:980 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:982 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:986 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" +#: ../../source/ref-changelog.md:988 +msgid "" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:990 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" +#: ../../source/ref-changelog.md:992 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:994 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" +#: ../../source/ref-changelog.md:996 +msgid "" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" +#: ../../source/ref-changelog.md:998 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1000 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:1002 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" +#: ../../source/ref-changelog.md:1004 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:1008 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:1010 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:1012 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" msgstr "" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/ref-changelog.md:1014 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" +#: ../../source/ref-changelog.md:1016 +msgid "" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1018 msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +#: ../../source/ref-changelog.md:1020 msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +#: ../../source/ref-changelog.md:1022 msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" +"Many code examples received small or even large maintenance updates, " +"among them are" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 -msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 -msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 -msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:1031 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:1033 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:1035 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +#: ../../source/ref-changelog.md:1037 msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:1041 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:1042 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:1043 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1044 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 -msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:1050 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" +#: ../../source/ref-changelog.md:1052 +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:1054 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:1056 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:1058 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:1060 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" +#: ../../source/ref-changelog.md:1062 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:1064 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:1066 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" +#: ../../source/ref-changelog.md:1068 +msgid "" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1070 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:1072 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:1074 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:1076 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" +#: ../../source/ref-changelog.md:1078 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:1080 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" +#: ../../source/ref-changelog.md:1082 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:1084 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" +#: ../../source/ref-changelog.md:1086 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" +#: ../../source/ref-changelog.md:1088 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:1090 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:1092 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" +#: ../../source/ref-changelog.md:1096 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:1097 msgid "" -"`Check out Flower Code Examples " -"`__" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:1098 msgid "" -"`Use Flower Baselines for your research " -"`__" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +#: ../../source/ref-changelog.md:1099 msgid "" -"`Watch Flower Summit 2023 videos `__" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" +#: ../../source/ref-changelog.md:1100 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" +#: ../../source/ref-changelog.md:1104 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1105 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" +#: ../../source/ref-changelog.md:1106 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1107 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +#: ../../source/ref-changelog.md:1108 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 +#: ../../source/ref-changelog.md:1109 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" +#: ../../source/ref-changelog.md:1110 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 +#: ../../source/ref-changelog.md:1111 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/ref-changelog.md:1117 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 +#: ../../source/ref-changelog.md:1119 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 +#: ../../source/ref-changelog.md:1121 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 +#: ../../source/ref-changelog.md:1123 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:1125 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:1127 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" +#: ../../source/ref-changelog.md:1129 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:1131 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" +#: ../../source/ref-changelog.md:1133 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:1135 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" +#: ../../source/ref-changelog.md:1137 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" +#: ../../source/ref-changelog.md:1139 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:1141 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:1143 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" +#: ../../source/ref-changelog.md:1145 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:1147 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" +#: ../../source/ref-changelog.md:1149 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:1151 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:1153 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:1155 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" +#: ../../source/ref-changelog.md:1157 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:1159 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:1161 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" +#: ../../source/ref-changelog.md:1165 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:1166 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/ref-changelog.md:1167 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:1168 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:1169 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" +#: ../../source/ref-changelog.md:1170 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:1171 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-changelog.md:1175 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:1177 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:1183 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:1185 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:1187 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" +#: ../../source/ref-changelog.md:1189 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" +#: ../../source/ref-changelog.md:1191 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:1192 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-changelog.md:1194 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:1198 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:1200 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:1204 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-changelog.md:1205 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:1206 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-changelog.md:1207 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1208 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" +#: ../../source/ref-changelog.md:1212 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-changelog.md:1214 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/ref-changelog.md:1216 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" msgstr "" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/ref-changelog.md:1218 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" +#: ../../source/ref-changelog.md:1220 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1222 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "" + +#: ../../source/ref-changelog.md:1228 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" +#: ../../source/ref-changelog.md:1233 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-changelog.md:1235 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" +#: ../../source/ref-changelog.md:1237 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-changelog.md:1239 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-changelog.md:1241 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" +#: ../../source/ref-changelog.md:1243 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-changelog.md:1245 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-changelog.md:1247 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-changelog.md:1251 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/ref-changelog.md:1255 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:1257 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-changelog.md:1259 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" +#: ../../source/ref-changelog.md:1261 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "" + +#: ../../source/ref-changelog.md:1267 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/ref-changelog.md:1269 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +#: ../../source/ref-changelog.md:1271 msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-changelog.md:1290 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1294 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-changelog.md:1300 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-changelog.md:1302 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1304 msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-changelog.md:1306 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." msgstr "" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/ref-changelog.md:1308 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" +#: ../../source/ref-changelog.md:1323 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#: ../../source/ref-changelog.md:1325 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-changelog.md:1327 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/ref-changelog.md:1329 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-changelog.md:1350 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 -msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-changelog.md:1354 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" +#: ../../source/ref-changelog.md:1355 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#: ../../source/ref-changelog.md:1359 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 -msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" +#: ../../source/ref-changelog.md:1365 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/ref-changelog.md:1366 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" +#: ../../source/ref-changelog.md:1367 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "" + +#: ../../source/ref-changelog.md:1373 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 -msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" +#: ../../source/ref-changelog.md:1381 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" +#: ../../source/ref-changelog.md:1382 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/ref-changelog.md:1383 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" +#: ../../source/ref-changelog.md:1384 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" +#: ../../source/ref-changelog.md:1385 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "" + +#: ../../source/ref-example-projects.rst:4 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" +#: ../../source/ref-example-projects.rst:9 +msgid "The following examples are available as standalone projects." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" +#: ../../source/ref-example-projects.rst:12 +msgid "Quickstart TensorFlow/Keras" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/ref-example-projects.rst:14 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/ref-example-projects.rst:17 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"`Quickstart TensorFlow (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/ref-example-projects.rst:19 +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:20 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"`Quickstart TensorFlow (Blog Post) `_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "" + +#: ../../source/ref-example-projects.rst:26 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" +#: ../../source/ref-example-projects.rst:29 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/ref-example-projects.rst:31 +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" + +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "" + +#: ../../source/ref-example-projects.rst:36 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +#: ../../source/ref-example-projects.rst:38 msgid "" -"Financial information from different organizations to detect financial " -"fraud" +"`PyTorch: From Centralized To Federated (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" +#: ../../source/ref-example-projects.rst:40 +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#: ../../source/ref-example-projects.rst:46 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" +#: ../../source/ref-example-projects.rst:49 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/ref-example-projects.rst:51 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/ref-faq.rst:9 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/ref-faq.rst:11 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." +"`Flower simulation PyTorch " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" +#: ../../source/ref-faq.rst:12 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/ref-faq.rst:16 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" +#: ../../source/ref-faq.rst:20 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/ref-faq.rst:22 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" +"`Android Kotlin example `_" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" + +#: ../../source/ref-faq.rst:27 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/ref-faq.rst:31 +msgid "Local blockchain with federated learning simulation." +msgstr "" + +#: ../../source/ref-faq.rst:32 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" +"`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/ref-faq.rst:33 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst:34 +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" + +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" + +#: ../../source/ref-faq.rst:36 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" -msgstr "" +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" + +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:9 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +msgid "Next, activate your environment, then run:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:41 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:108 +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "The Data" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +msgid "The Model" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +msgid "The ClientApp" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:283 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +msgid "The ServerApp" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:361 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:9 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:20 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:26 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:80 +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:86 +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:94 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:112 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:118 +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:133 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:141 +msgid "" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:150 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:177 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:9 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:20 +msgid "" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:30 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:34 +msgid "" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:51 +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:63 +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:73 +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:95 +msgid "" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:107 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:132 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:182 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:194 +msgid "``set_parameters (optional)``" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:193 +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:210 +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:213 +msgid "" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:315 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:321 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:325 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +msgid "Quickstart MLX" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:10 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:116 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:243 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:277 +msgid "Putting everything together we have:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:331 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:390 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:309 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:348 +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +msgid "Video tutorial" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +msgid "" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid "``get_model_parameters()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid "``set_model_params()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "``set_initial_params()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +msgid "" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +msgid "" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +msgid "" +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +msgid "" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "``set_parameters`` (optional)" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +msgid "is directly imported with ``utils.set_model_params()``" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +msgid "return the updated local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +msgid "" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +msgid "" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +msgid "``server.py``, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +msgid "" +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +msgid "" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "Why federated XGBoost?" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:36 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:48 +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:51 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:67 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:71 +msgid "" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:101 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:115 +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:135 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:149 +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:197 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:236 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:278 +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:298 +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:330 +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:333 +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:350 +msgid "" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:360 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:364 +msgid "" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 +msgid "" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:406 +msgid "Then, we start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:420 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:422 +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:519 +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:579 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:584 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:664 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:668 +msgid "" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +msgid "Cyclic training" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:733 +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:775 +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:778 +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:842 +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:873 +msgid "Customised centralised/distributed evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:875 +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:907 +msgid "" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:911 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:916 +msgid "Flower simulation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:918 +msgid "" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:954 +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1065 +msgid "" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1085 +msgid "" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1175 +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +msgid "We also have an argument parser for simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +msgid "This integrates all arguments for both client and server sides." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +msgid "Example commands" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1322 +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +msgid "Then, on each client terminal, we start the clients:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +msgid "To run the same experiment with Flower simulation:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:1341 +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +msgid "`Check out Flower Code Examples `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +msgid "Let's get started! 🌼" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +msgid "Install dependencies" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +msgid "Load the data" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Define the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +msgid "Train the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +msgid "Update model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +msgid "Define the Flower ClientApp" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Clone o repositório do flower." + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +msgid "Run the training" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +msgid "Finally, we run the simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you want have to do it. " +#~ "Usually it should be enough to " +#~ "install Docker on your system and " +#~ "ensure its available on your command " +#~ "line. Additionally, install the `VSCode " +#~ "Containers Extension `_." +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " +#~ "(without extras)" +#~ msgstr "" + +#~ msgid "" +#~ "``flwr = { path = " +#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" +#~ " [\"simulation\"] }`` (with extras)" +#~ msgstr "" + +#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgstr "" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.7.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" + +#~ msgid "Before the release" +#~ msgstr "" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" + +#~ msgid "" +#~ "Tag the release commit with the " +#~ "version number as soon as the PR" +#~ " is merged: ``git tag v0.12.3``, then" +#~ " ``git push --tags``. This will " +#~ "create a draft release on GitHub " +#~ "containing the correct artifacts and the" +#~ " relevant part of the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "Note that, in order to build the" +#~ " documentation locally (with ``poetry run" +#~ " make html``, like described below), " +#~ "`Pandoc _` needs " +#~ "to be installed on the system." +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_ and examples " +#~ "of `good first contributions " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This will create a `flower/` (or " +#~ "the name of your fork if you " +#~ "renamed it) folder in the current " +#~ "working directory." +#~ msgstr "" + +#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgstr "" + +#~ msgid "" +#~ "Once you click the `Compare & pull" +#~ " request` button, you should see " +#~ "something similar to this:" +#~ msgstr "" + +#~ msgid "Find the source file in `doc/source`" +#~ msgstr "" + +#~ msgid "" +#~ "Make the change in the `.rst` file" +#~ " (beware, the dashes under the title" +#~ " should be the same length as " +#~ "the title itself)" +#~ msgstr "" + +#~ msgid "Change the file name to `save-progress.rst`" +#~ msgstr "" + +#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgstr "" + +#~ msgid "" +#~ "This will cause a redirect from " +#~ "`saving-progress.html` to `save-progress.html`," +#~ " old links will continue to work." +#~ msgstr "" + +#~ msgid "" +#~ "For the lateral navigation bar to " +#~ "work properly, it is very important " +#~ "to update the `index.rst` file as " +#~ "well. This is where we define the" +#~ " whole arborescence of the navbar." +#~ msgstr "" + +#~ msgid "Find and modify the file name in `index.rst`" +#~ msgstr "" + +#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgstr "" + +#~ msgid "`Python 3.7 `_ or above" +#~ msgstr "" + +#~ msgid "" +#~ "First, clone the `Flower repository " +#~ "`_ from GitHub::" +#~ msgstr "" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" + +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, you can use the following " +#~ "script that will install pyenv, set " +#~ "it up and create the virtual " +#~ "environment (with :code:`Python 3.8.17` by " +#~ "default)::" +#~ msgstr "" + +#~ msgid "" +#~ "Third, install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" +#~ msgstr "" + +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "_`. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental, the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. Therefore, we use " +#~ "an adaptive approach [andrew]_ that " +#~ "continuously adjusts the clipping threshold" +#~ " to track a prespecified quantile of" +#~ " the update norm distribution." +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realises the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan, H. Brendan, et al. \"Learning" +#~ " differentially private recurrent language " +#~ "models.\" arXiv preprint arXiv:1710.06963 " +#~ "(2017)." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "private learning with adaptive clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems 34 (2021): 17455-17466." +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verfiy if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "" + +#~ msgid "start_client" +#~ msgstr "" + +#~ msgid "start_numpy_client" +#~ msgstr "" + +#~ msgid "start_simulation" +#~ msgstr "" + +#~ msgid "server.start_server" +#~ msgstr "" + +#~ msgid "server.strategy" +#~ msgstr "" + +#~ msgid "server.strategy.Strategy" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "" + +#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgstr "" + +#~ msgid "server.strategy.Krum" +#~ msgstr "" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "" + +#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgstr "" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" + +#~ msgid "" +#~ "Using the `client_fn`, Flower clients " +#~ "can interchangeably run as standalone " +#~ "processes (i.e. via `start_client`) or " +#~ "in simulation (i.e. via `start_simulation`)" +#~ " without requiring changes to how the" +#~ " client class is defined and " +#~ "instantiated. Calling `start_numpy_client` is " +#~ "now deprecated." +#~ msgstr "" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384)), " +#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to baselines** " +#~ "([#2301](https://github.com/adap/flower/pull/2301), " +#~ "[#2305](https://github.com/adap/flower/pull/2305), " +#~ "[#2307](https://github.com/adap/flower/pull/2307), " +#~ "[#2327](https://github.com/adap/flower/pull/2327), " +#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ msgstr "" + +#~ msgid "" +#~ "**General updates to the simulation " +#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " +#~ "[#2447](https://github.com/adap/flower/pull/2447), " +#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ msgstr "" + +#~ msgid "" +#~ "**General improvements** " +#~ "([#2309](https://github.com/adap/flower/pull/2309), " +#~ "[#2310](https://github.com/adap/flower/pull/2310), " +#~ "[2313](https://github.com/adap/flower/pull/2313), " +#~ "[#2316](https://github.com/adap/flower/pull/2316), " +#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +#~ " [#2360](https://github.com/adap/flower/pull/2360), " +#~ "[#2402](https://github.com/adap/flower/pull/2402), " +#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ msgstr "" + +#~ msgid "" +#~ "`flower-superlink --driver-api-address " +#~ "\"0.0.0.0:8081\" --fleet-api-address " +#~ "\"0.0.0.0:8086\"`" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using XGBoost and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we install the necessary packages" +#~ " for PyTorch (``torch`` and " +#~ "``torchvision``) and Flower (``flwr``):" +#~ msgstr "" + +#~ msgid "" +#~ "Federated learning can be applied to " +#~ "many different types of tasks across " +#~ "different domains. In this tutorial, we" +#~ " introduce federated learning by training" +#~ " a simple convolutional neural network " +#~ "(CNN) on the popular CIFAR-10 dataset." +#~ " CIFAR-10 can be used to train " +#~ "image classifiers that distinguish between " +#~ "images from ten different classes:" +#~ msgstr "" + +#~ msgid "" +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" + +#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" +#~ msgstr "" + +#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" +#~ msgstr "" + +#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" +#~ msgstr "" + +#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ msgstr "" + +#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" +#~ msgstr "" + +#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" +#~ msgstr "" + +#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgstr "" + +#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgstr "" + +#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgstr "" + +#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgstr "" + +#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgstr "" + +#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgstr "" + +#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgstr "" + +#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgstr "" + +#~ msgid "" +#~ "Please follow the first section on " +#~ "`Run Flower using Docker " +#~ "`_ which covers this" +#~ " step in more detail." +#~ msgstr "" + +#~ msgid "" +#~ "Since `Flower 1.5 `_ we have " +#~ "introduced translations to our doc " +#~ "pages, but, as you might have " +#~ "noticed, the translations are often " +#~ "imperfect. If you speak languages other" +#~ " than English, you might be able " +#~ "to help us in our effort to " +#~ "make Federated Learning accessible to as" +#~ " many people as possible by " +#~ "contributing to those translations! This " +#~ "might also be a great opportunity " +#~ "for those wanting to become open " +#~ "source contributors with little prerequistes." +#~ msgstr "" + +#~ msgid "" +#~ "You input your translation in the " +#~ "textbox at the top and then, once" +#~ " you are happy with it, you " +#~ "either press ``Save and continue`` (to" +#~ " save the translation and go to " +#~ "the next untranslated string), ``Save " +#~ "and stay`` (to save the translation " +#~ "and stay on the same page), " +#~ "``Suggest`` (to add your translation to" +#~ " suggestions for other users to " +#~ "view), or ``Skip`` (to go to the" +#~ " next untranslated string without saving" +#~ " anything)." +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "" +#~ "Source: `Official VSCode documentation " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Developing inside a Container " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Remote development in Containers " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "If you are not familiar with " +#~ "Flower Baselines, you should probably " +#~ "check-out our `contributing guide for " +#~ "baselines `_." +#~ msgstr "" + +#~ msgid "" +#~ "You should then check out the open" +#~ " `issues " +#~ "`_" +#~ " for baseline requests. If you find" +#~ " a baseline that you'd like to " +#~ "work on and that has no assignes," +#~ " feel free to assign it to " +#~ "yourself and start working on it!" +#~ msgstr "" + +#~ msgid "" +#~ "If you're familiar with how contributing" +#~ " on GitHub works, you can directly" +#~ " checkout our `getting started guide " +#~ "for contributors `_." +#~ msgstr "" + +#~ msgid "" +#~ "Git is a distributed version control " +#~ "tool. This allows for an entire " +#~ "codebase's history to be stored and " +#~ "every developer's machine. It is a " +#~ "software that will need to be " +#~ "installed on your local machine, you " +#~ "can follow this `guide " +#~ "`_ to set it up." +#~ msgstr "" + +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "https://github.com/adap/flower (while connected to" +#~ " your GitHub account) and click the" +#~ " ``Fork`` button situated on the top" +#~ " right of the page." +#~ msgstr "" + +#~ msgid "" +#~ "Now we will add an upstream " +#~ "address to our repository. Still in " +#~ "the same directroy, we must run " +#~ "the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by following " +#~ "this `getting started guide for " +#~ "contributors`_ (note that you won't need" +#~ " to clone the repository). Once you" +#~ " are able to write code and " +#~ "test it, you can finally start " +#~ "making changes!" +#~ msgstr "" + +#~ msgid "" +#~ "For our documentation, we’ve started to" +#~ " use the `Diàtaxis framework " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Our “How to” guides should have " +#~ "titles that continue the sencence “How" +#~ " to …”, for example, “How to " +#~ "upgrade to Flower 1.0”." +#~ msgstr "" + +#~ msgid "" +#~ "This issue is about changing the " +#~ "title of a doc from present " +#~ "continious to present simple." +#~ msgstr "" + +#~ msgid "" +#~ "Let's take the example of “Saving " +#~ "Progress” which we changed to “Save " +#~ "Progress”. Does this pass our check?" +#~ msgstr "" + +#~ msgid "Before: ”How to saving progress” ❌" +#~ msgstr "" + +#~ msgid "After: ”How to save progress” ✅" +#~ msgstr "" + +#~ msgid "" +#~ "This is a tiny change, but it’ll" +#~ " allow us to test your end-" +#~ "to-end setup. After cloning and " +#~ "setting up the Flower repo, here’s " +#~ "what you should do:" +#~ msgstr "" + +#~ msgid "" +#~ "Build the docs and check the " +#~ "result: ``_" +#~ msgstr "" + +#~ msgid "Here’s how to change the file name:" +#~ msgstr "" + +#~ msgid "" +#~ "Commit the changes (commit messages are" +#~ " always imperative: “Do something”, in " +#~ "this case “Change …”)" +#~ msgstr "" + +#~ msgid "" +#~ "`Good first contributions " +#~ "`_, where you should" +#~ " particularly look into the " +#~ ":code:`baselines` contributions." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existant, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing machine learning" +#~ " workload with `FedBN `_, a federated training strategy" +#~ " designed for non-iid data. We " +#~ "are using PyTorch to train a " +#~ "Convolutional Neural Network(with Batch " +#~ "Normalization layers) on the CIFAR-10 " +#~ "dataset. When applying FedBN, only few" +#~ " changes needed compared to `Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `_." +#~ msgstr "" + +#~ msgid "" +#~ "All files are revised based on " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. The " +#~ "only thing to do is modifying the" +#~ " file called :code:`cifar.py`, revised part" +#~ " is shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used PyTorch " +#~ "before. Let's take the next step " +#~ "and use what we've built to create" +#~ " a federated learning system within " +#~ "FedBN, the sytstem consists of one " +#~ "server and two clients." +#~ msgstr "" + +#~ msgid "" +#~ "If you have read `Example: PyTorch " +#~ "- From Centralized To Federated " +#~ "`_, the following" +#~ " parts are easy to follow, onyl " +#~ ":code:`get_parameters` and :code:`set_parameters` " +#~ "function in :code:`client.py` needed to " +#~ "revise. If not, please read the " +#~ "`Example: PyTorch - From Centralized To" +#~ " Federated `_. first." +#~ msgstr "" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" + +#~ msgid "A Closer Look" +#~ msgstr "" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" + +#~ msgid "Give It a Try" +#~ msgstr "" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "" + +#~ msgid "Differential privacy" +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "" + +#~ msgid "DP-FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" + +#~ msgid "Wrapper-based approach" +#~ msgstr "" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" + +#~ msgid "Server-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr "" + +#~ msgid "Client-side logic" +#~ msgstr "" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" + +#~ msgid "" +#~ "McMahan et al. \"Learning Differentially " +#~ "Private Recurrent Language Models.\" " +#~ "International Conference on Learning " +#~ "Representations (ICLR), 2017." +#~ msgstr "" + +#~ msgid "" +#~ "Andrew, Galen, et al. \"Differentially " +#~ "Private Learning with Adaptive Clipping.\" " +#~ "Advances in Neural Information Processing " +#~ "Systems (NeurIPS), 2021." +#~ msgstr "" + +#~ msgid "" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by `implementing" +#~ " a custom strategy from scratch " +#~ "`_. Here's a nonsensical " +#~ "example that customizes :code:`FedAvg` by " +#~ "adding a custom ``\"hello\": \"world\"`` " +#~ "configuration key/value pair to the " +#~ "config dict of a *single client* " +#~ "(only the first client in the " +#~ "list, the other clients in this " +#~ "round to not receive this \"special\"" +#~ " config value):" +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_fit`." +#~ msgstr "" + +#~ msgid "" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the the list returned " +#~ "from :code:`configure_evaluate`." +#~ msgstr "" + +#~ msgid "" +#~ "`How to run Flower using Docker " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "" +#~ "Ray Metrics: ``_" +#~ msgstr "" + +#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`ClientApp `\\ " +#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgstr "" + +#~ msgid "Flower driver SDK." +#~ msgstr "" + +#~ msgid "driver" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`GrpcDriver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgstr "" + +#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(task\\_ids\\)" +#~ msgstr "" + +#~ msgid "Get task results." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ " +#~ "\\(task\\_ins\\_list\\)" +#~ msgstr "" + +#~ msgid "Schedule tasks." +#~ msgstr "" + +#~ msgid "GrpcDriver" +#~ msgstr "" + +#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Connect to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_run " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "Request for run ID." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`disconnect " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get_nodes `\\" +#~ " \\(req\\)" +#~ msgstr "" + +#~ msgid "Get client IDs." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`pull_task_res " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`push_task_ins " +#~ "`\\ \\(req\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "running the clients' jobs (i.e. their" +#~ " `fit()` method)." +#~ msgstr "" + +#~ msgid "" +#~ "Much effort went into a completely " +#~ "restructured Flower docs experience. The " +#~ "documentation on [flower.ai/docs](flower.ai/docs) is" +#~ " now divided into Flower Framework, " +#~ "Flower Baselines, Flower Android SDK, " +#~ "Flower iOS SDK, and code example " +#~ "projects." +#~ msgstr "" + +#~ msgid "" +#~ "The first preview release of Flower " +#~ "Baselines has arrived! We're kickstarting " +#~ "Flower Baselines with implementations of " +#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," +#~ " and FedAvgM. Check the documentation " +#~ "on how to use [Flower " +#~ "Baselines](https://flower.ai/docs/using-baselines.html). " +#~ "With this first preview release we're" +#~ " also inviting the community to " +#~ "[contribute their own " +#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" + +#~ msgid "The following examples are available as standalone projects." +#~ msgstr "" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart TensorFlow (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`Quickstart PyTorch (Tutorial) " +#~ "`_" +#~ msgstr "" + +#~ msgid "" +#~ "`PyTorch: From Centralized To Federated " +#~ "(Tutorial) `_" +#~ msgstr "" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" + +#~ msgid "Extra Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" + +#~ msgid "For PyTorch examples::" +#~ msgstr "" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" + +#~ msgid "PyTorch Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" + +#~ msgid "First, start a Flower server:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "" + +#~ msgid "TensorFlow Examples" +#~ msgstr "" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "" + +#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgstr "" + +#~ msgid "" +#~ "`Flower meets KOSMoS `_." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the full code example: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, for running the " +#~ "Flower Python server, it is recommended" +#~ " to create a virtual environment and" +#~ " run everything within a `virtualenv " +#~ "`_. " +#~ "For the Flower client implementation in" +#~ " iOS, it is recommended to use " +#~ "Xcode as our IDE." +#~ msgstr "" + +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this informations beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." +#~ msgstr "" + +#~ msgid "" +#~ "After we have all of the necessary" +#~ " informations, let's create our Flower " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongise Flower," +#~ " for example, PyTorch. This tutorial " +#~ "might be removed in future versions " +#~ "of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "It is recommended to create a " +#~ "virtual environment and run everything " +#~ "within this `virtualenv `_." +#~ msgstr "" + +#~ msgid "" +#~ "First of all, it is recommended to" +#~ " create a virtual environment and run" +#~ " everything within a `virtualenv " +#~ "`_." +#~ msgstr "" + +#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML `_, a popular" +#~ " image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The utility :code:`utils.load_mnist()` downloads " +#~ "the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "Now that you have known how " +#~ "federated XGBoost work with Flower, it's" +#~ " time to run some more comprehensive" +#~ " experiments by customising the " +#~ "experimental settings. In the xgboost-" +#~ "comprehensive example (`full code " +#~ "`_), we provide more options " +#~ "to define various experimental setups, " +#~ "including aggregation strategies, data " +#~ "partitioning and centralised/distributed evaluation." +#~ " We also support `Flower simulation " +#~ "`_ making it easy to " +#~ "simulate large client cohorts in a " +#~ "resource-aware manner. Let's take a " +#~ "look!" +#~ msgstr "" + +#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgstr "" + +#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgstr "" + +#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgstr "" + +#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgstr "" + +#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgstr "" + +#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgstr "" + +#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgstr "" + +#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgstr "" + +#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgstr "" + +#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgstr "" + +#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgstr "" + +#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgstr "" + +#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgstr "" + +#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgstr "" + +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "``BASE_IMAGE_TAG``" + +#~ msgid "The image tag of the base image." +#~ msgstr "A tag da imagem da imagem base." + +#~ msgid "" +#~ "Open the notebook ``doc/source/tutorial-" +#~ "get-started-with-flower-pytorch.ipynb``:" +#~ msgstr "" + +#~ msgid "" +#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +#~ "/tutorial-get-started-with-flower-" +#~ "pytorch.ipynb" +#~ msgstr "" + +#~ msgid "" +#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" +#~ "name/doc/source/tutorial-get-started-with-" +#~ "flower-pytorch.ipynb" +#~ msgstr "" + +#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgstr "" + +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" + +#~ msgid "Open a PR (as shown above)" +#~ msgstr "" + +#~ msgid "How to write a good PR title" +#~ msgstr "" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "" + +#~ msgid "Implement Algorithm" +#~ msgstr "" + +#~ msgid "Database" +#~ msgstr "" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "" + +#~ msgid "Improve code in module" +#~ msgstr "" + +#~ msgid "Change SomeModule" +#~ msgstr "" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "" + +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "" + +#~ msgid "Changelog entry" +#~ msgstr "" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "" + +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "" + +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "" + +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "" + +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "" + +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr "" + +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "" + +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general examples change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr "" + +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "" + +#~ msgid "Note that only one token should be used." +#~ msgstr "" + +#~ msgid "" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "" + +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "" + +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" + +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr "" + +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" + +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" + +#~ msgid "MXNet meets Flower" +#~ msgstr "" + +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" + +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." +#~ msgstr "" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" + +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "" + +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" + +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "" + +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "" + +#~ msgid "" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server can " +#~ "be started and how a Flower client" +#~ " can establish a secure connections " +#~ "to it." +#~ msgstr "" + +#~ msgid "" +#~ "The code example comes with a " +#~ "README.md file which will explain how" +#~ " to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how. Stick to " +#~ "this guide for a deeper introduction " +#~ "to the topic." +#~ msgstr "" + +#~ msgid "" +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh`" +#~ msgstr "" + +#~ msgid "with the following command sequence:" +#~ msgstr "" + +#~ msgid "" +#~ "The approach how the SSL certificates" +#~ " are generated in this example can" +#~ " serve as an inspiration and starting" +#~ " point but should not be taken " +#~ "as complete for production environments. " +#~ "Please refer to other sources regarding" +#~ " the issue of correctly generating " +#~ "certificates for production environments." +#~ msgstr "" + +#~ msgid "" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "" + +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." +#~ msgstr "" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a client which uses the" +#~ " previously generated scripts:" +#~ msgstr "" + +#~ msgid "" +#~ "When setting :code:`root_certificates`, the " +#~ "client expects the PEM-encoded root " +#~ "certificates as a byte string. We " +#~ "are again using :code:`Path` to simplify" +#~ " reading those as byte strings." +#~ msgstr "" + +#~ msgid "" +#~ "You should now have learned how to" +#~ " generate self-signed certificates using" +#~ " the given script, start a SSL-" +#~ "enabled server, and have a client " +#~ "establish a secure connection to it." +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "Flower server" +#~ msgstr "" + +#~ msgid "" +#~ "The command will pull the Docker " +#~ "image with the tag " +#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " +#~ "The tag contains the information which" +#~ " Flower, Python and Ubuntu is used." +#~ " In this case, it uses Flower " +#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower server keeps " +#~ "state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower server. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `_ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the server " +#~ "supports:" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the server on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``-v`` to" +#~ " mount the user's home directory " +#~ "(``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the server starts, the" +#~ " file ``state.db`` is created in the" +#~ " user's home directory on your host" +#~ " system. If the file already exists," +#~ " the server tries to restore the " +#~ "state from the file. To start the" +#~ " server with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "CA certificate, a server certificate and" +#~ " a server private key." +#~ msgstr "" + +#~ msgid "" +#~ "For testing purposes, you can generate" +#~ " your own self-signed certificates. " +#~ "The `Enable SSL connections " +#~ "`_ page contains " +#~ "a section that will guide you " +#~ "through the process." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``-v`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the server to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the server with the " +#~ "``--certificates`` flag." +#~ msgstr "" + +#~ msgid "Using a different Flower or Python version" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower or Python, you " +#~ "can do so by changing the tag. " +#~ "All versions we provide are available" +#~ " on `Docker Hub " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgstr "" + +#~ msgid "" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " +#~ "` | :doc" +#~ ":`scikit-learn `" +#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ msgstr "" + +#~ msgid "flower-driver-api" +#~ msgstr "" + +#~ msgid "flower-fleet-api" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " +#~ ":py:class:`bytes`, :py:class:`bool`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_error_reply " +#~ "`\\ \\(error\\, " +#~ "ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_reply `\\ " +#~ "\\(content\\, ttl\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " +#~ "[:py:class:`int`, :py:class:`float`, " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" + +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "" + +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`create_message `\\" +#~ " \\(content\\, message\\_type\\, ...\\)" +#~ msgstr "" + +#~ msgid "" +#~ "Time-to-live for the round trip" +#~ " of this message, i.e., the time " +#~ "from sending this message to receiving" +#~ " a reply. It specifies the duration" +#~ " for which the message and its " +#~ "potential reply are considered valid." +#~ msgstr "" + +#~ msgid "start\\_driver" +#~ msgstr "" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "The Driver object to use." +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "" + +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" + +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "" + +#~ msgid "Quickstart MXNet" +#~ msgstr "" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "" + +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" + +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "" + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "" + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "" + +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" + +#~ msgid "They can be implemented in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" + +#~ msgid "" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgstr "" + +#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`load_mnist()`" +#~ msgstr "" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "" + +#~ msgid ":code:`shuffle()`" +#~ msgstr "" + +#~ msgid "Shuffles data and its label" +#~ msgstr "" + +#~ msgid ":code:`partition()`" +#~ msgstr "" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "" + +#~ msgid "" +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." +#~ msgstr "" + +#~ msgid "" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy." +#~ msgstr "" + +#~ msgid "Let's get stated!" +#~ msgstr "" + +#~ msgid "" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4500 training examples and" +#~ " 500 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." +#~ msgstr "" + +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgstr "" + +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" + +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" + +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" + +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgstr "" + +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" + +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" + +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" + +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgstr "" + +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgstr "" + +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgstr "" + +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" + +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" + +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" + +#~ msgid "" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." +#~ msgstr "" +#~ "Atualmente, Flower fornece duas imagens, " +#~ "uma imagem base e uma imagem de" +#~ " servidor. Também haverá uma imagem " +#~ "de cliente em breve. A imagem " +#~ "base, como o nome sugere, contém " +#~ "dependências básicas que tanto o " +#~ "servidor quanto o cliente precisam. Isso" +#~ " inclui dependências do sistema, Python " +#~ "e ferramentas Python. A imagem do " +#~ "servidor é baseada na imagem base, " +#~ "mas também instala o servidor Flower " +#~ "usando ``pip```." + +#~ msgid "``3.11``" +#~ msgstr "``3.11``" + +#~ msgid "Defaults to ``22.04``." +#~ msgstr "Como padrão ``22.04``." + +#~ msgid "Building the SuperLink image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "Pré-definido para ``flwr/server``." + +#~ msgid "The Python version of the base image." +#~ msgstr "O nome do repositório da imagem base." + +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "Como padrão ``22.04``." + +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." + +#~ msgid "The PyPI package to install." +#~ msgstr "" + +#~ msgid "Defaults to ``flwr``." +#~ msgstr "Pré-definido para ``flwr/server``." + +#~ msgid "" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "" +#~ "O nome da imagem é ``flwr_server`` " +#~ "e a tag ``0.1.0``. Lembre-se que" +#~ " os argumentos de compilação, bem " +#~ "como o nome e a tag podem " +#~ "ser adaptados às suas necessidades. " +#~ "Esses valores servem apenas como " +#~ "exemplos." + +#~ msgid "Creating New Messages" +#~ msgstr "Criando novas mensagens" + +#~ msgid "" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "" + +#~ msgid "Server's side:" +#~ msgstr "" + +#~ msgid "Client's side:" +#~ msgstr "" + +#~ msgid "" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "" + +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "" + +#~ msgid "" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." +#~ msgstr "" + +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "" + +#~ msgid "Within the ClientMessage block:" +#~ msgstr "" + +#~ msgid "" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "" + +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "" + +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "" + +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "" + +#~ msgid "" +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." +#~ msgstr "" + +#~ msgid "The four functions:" +#~ msgstr "" + +#~ msgid "Sending the Message from the Server" +#~ msgstr "" + +#~ msgid "" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "" + +#~ msgid "Receiving the Message by the Client" +#~ msgstr "" + +#~ msgid "" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" +#~ msgstr "" + +#~ msgid "Within the handle function:" +#~ msgstr "" + +#~ msgid "And add a new function:" +#~ msgstr "" + +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a path where you want to" +#~ " save the file on your host " +#~ "system and a name for the database" +#~ " file. In the example below, we " +#~ "tell Docker via the flag ``--volume``" +#~ " to mount the user's home directory" +#~ " (``~/`` on your host) into the " +#~ "``/app/`` directory of the container. " +#~ "Furthermore, we use the flag " +#~ "``--database`` to specify the name of" +#~ " the database file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the user's home directory on your " +#~ "host system. If the file already " +#~ "exists, the SuperLink tries to restore" +#~ " the state from the file. To " +#~ "start the SuperLink with an empty " +#~ "database, simply remove the ``state.db`` " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/`` directory of the container. " +#~ "This allows the SuperLink to access " +#~ "the files within the container. Finally," +#~ " we pass the names of the " +#~ "certificates to the SuperLink with the" +#~ " ``--certificates`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "``--server 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "``--server 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--certificates`` flag " +#~ "when starting the container." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to use a different " +#~ "version of Flower, for example Flower" +#~ " nightly, you can do so by " +#~ "changing the tag. All available versions" +#~ " are on `Docker Hub " +#~ "`__." +#~ msgstr "" + +#~ msgid "" +#~ "Here's another example to start with " +#~ "HTTPS. Use the ``--certificates`` command " +#~ "line argument to pass paths to (CA" +#~ " certificate, server certificate, and " +#~ "server private key)." +#~ msgstr "" + +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "" + +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "" + +#~ msgid "Unreleased" +#~ msgstr "" + +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "" + +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "" + +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "" + +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "" + +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "" + +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "" + +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "" + +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "" + +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "" + +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "" + +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "" + +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "" + +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "" + +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "" + +#~ msgid "Edge Client Engine" +#~ msgstr "Engine do Edge Client" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Edge " +#~ "Client" + +#~ msgid "Virtual Client Engine" +#~ msgstr "Engine do Virtual Client" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com Engine do Virtual" +#~ " Client" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "" +#~ "Engine do Virtual Client e do Edge" +#~ " Client no mesma carga de trabalho" +#~ " (workload)" + +#~ msgid "" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "" +#~ "`Flower `_ arquitetura principal" +#~ " do framework com ambas engines do" +#~ " Virtual Client e do Edge Client" + +#~ msgid "Clone the flower repository." +#~ msgstr "Clone o repositório do flower." + +#~ msgid "" +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." +#~ msgstr "" +#~ "Por favor, siga a primeira seção " +#~ "em :doc:`Execute o Flower usando Docker" +#~ " `" +#~ " que cobre este passo em mais " +#~ "detalhes." + +#~ msgid "``22.04``" +#~ msgstr "``23.0.1``" + +#~ msgid "``23.0.1``" +#~ msgstr "``23.0.1``" + +#~ msgid "``69.0.2``" +#~ msgstr "``69.0.2``" + +#~ msgid "``1.8.0``" +#~ msgstr "``1.7.0``" + +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" +#~ msgstr "" + +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "" +#~ "O exemplo a seguir cria uma imagem" +#~ " de servidor com a imagem base " +#~ "oficial do Flower py3.11-ubuntu22.04 e " +#~ "Flower 1.7.0:" + +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "Versão da imagem Docker oficial do Ubuntu." + +#~ msgid "" +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." +#~ msgstr "" + +#~ msgid "**Via the UI**" +#~ msgstr "" + +#~ msgid "" +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." +#~ msgstr "" + +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" + +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" + +#~ msgid "**Via the GitHub CI**" +#~ msgstr "" + +#~ msgid "" +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." +#~ msgstr "" + +#~ msgid "" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" + +#~ msgid "Preliminarities" +#~ msgstr "" + +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "" + +#~ msgid "" +#~ "\\small\n" +#~ "P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +#~ msgstr "" + +#~ msgid "" +#~ "The following command can be used " +#~ "to verify if Flower was successfully " +#~ "installed. If everything worked, it " +#~ "should print the version of Flower " +#~ "to the command line::" +#~ msgstr "" + +#~ msgid ":doc:`How to run Flower using Docker `" +#~ msgstr "" + +#~ msgid "" +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." +#~ msgstr "" + +#~ msgid "Before you start, make sure that the Docker daemon is running:" +#~ msgstr "" + +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" + +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" + +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" + +#~ msgid "Flower SuperLink" +#~ msgstr "" + +#~ msgid "Quickstart" +#~ msgstr "" + +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "" + +#~ msgid "" +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower SuperLink keeps" +#~ " state in-memory. When using the " +#~ "Docker flag ``--rm``, the state is " +#~ "not persisted between container starts. " +#~ "We will show below how to save " +#~ "the state in a file on your " +#~ "host system." +#~ msgstr "" + +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" + +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "" + +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "" + +#~ msgid "" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" + +#~ msgid "" +#~ "In the example below, we create a" +#~ " new directory, change the user ID" +#~ " and tell Docker via the flag " +#~ "``--volume`` to mount the local " +#~ "``state`` directory into the ``/app/state``" +#~ " directory of the container. Furthermore," +#~ " we use the flag ``--database`` to" +#~ " specify the name of the database " +#~ "file." +#~ msgstr "" + +#~ msgid "" +#~ "As soon as the SuperLink starts, " +#~ "the file ``state.db`` is created in " +#~ "the ``state`` directory on your host " +#~ "system. If the file already exists, " +#~ "the SuperLink tries to restore the " +#~ "state from the file. To start the" +#~ " SuperLink with an empty database, " +#~ "simply remove the ``state.db`` file." +#~ msgstr "" + +#~ msgid "Enabling SSL for secure connections" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, you will need a " +#~ "PEM-encoded root certificate, a PEM-" +#~ "encoded private key and a PEM-" +#~ "encoded certificate chain." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" + +#~ msgid "" +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." +#~ msgstr "" + +#~ msgid "Flower SuperNode" +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "" + +#~ msgid "" +#~ "The SuperNode Docker image currently " +#~ "works only with the 1.9.0-nightly " +#~ "release. A stable version will be " +#~ "available when Flower 1.9.0 (stable) " +#~ "gets released (ETA: May). A SuperNode" +#~ " nightly image must be paired with" +#~ " the corresponding SuperLink and ServerApp" +#~ " nightly images released on the same" +#~ " day. To ensure the versions are " +#~ "in sync, using the concrete tag, " +#~ "e.g., ``1.9.0.dev20240501`` instead of " +#~ "``nightly`` is recommended." +#~ msgstr "" + +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" + +#~ msgid "" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "" +#~ "Antes de começarmos, precisamos encontrar " +#~ "alguns pré-requisitos em nosso ambiente " +#~ "de desenvolvimento local." + +#~ msgid "Creating a SuperNode Dockerfile" +#~ msgstr "" + +#~ msgid "Let's assume the following project layout:" +#~ msgstr "" + +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" + +#~ msgid "" +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" + +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "" + +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "Construindo a imagem do servidor" + +#~ msgid "" +#~ "Next, we build the SuperNode Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ClientApp code are located." +#~ msgstr "" + +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "" + +#~ msgid "``docker run``: This is the command to run a new Docker container." +#~ msgstr "" + +#~ msgid "" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "" + +#~ msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" + +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "" + +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "" + +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "" + +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" + +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "" + +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "" + +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the SuperNode to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flag when starting the container." +#~ msgstr "" + +#~ msgid "Flower ServerApp" +#~ msgstr "" + +#~ msgid "" +#~ "The procedure for building and running" +#~ " a ServerApp image is almost " +#~ "identical to the SuperNode image." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 -msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "" +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" -msgstr "" +#~ msgid "" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "" +#~ msgid "" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 -msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" -msgstr "" +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 -msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." -msgstr "" +#~ msgid "" +#~ "Next, we build the ServerApp Docker " +#~ "image by running the following command" +#~ " in the directory where Dockerfile " +#~ "and ServerApp code are located." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" -msgstr "" +#~ msgid "" +#~ "We gave the image the name " +#~ "``flwr_serverapp``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "" +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "Construindo a imagem do servidor" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "" +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 -msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." -msgstr "" +#~ msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 -msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." -msgstr "" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 -msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" -msgstr "" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 -msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" +#~ msgid "" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 -msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "" +#~ msgid "" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 -msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" +#~ msgid "Advanced Docker options" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "" +#~ msgid "Run with root user privileges" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 -msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." -msgstr "" +#~ msgid "" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -msgstr "" +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 -msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "" +#~ msgid "Using a different Flower version" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 -msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "" +#~ msgid "Pinning a Docker image to a specific version" +#~ msgstr "" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 -msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." -msgstr "" +#~ msgid "" +#~ "It may happen that we update the" +#~ " images behind the tags. Such updates" +#~ " usually include security updates of " +#~ "system dependencies that should not " +#~ "change the functionality of Flower. " +#~ "However, if you want to ensure " +#~ "that you always use the same " +#~ "image, you can specify the hash of" +#~ " the image instead of the tag." +#~ msgstr "" #~ msgid "" -#~ "Configuring and setting up the " -#~ ":code:`Dockerfile` as well the configuration" -#~ " for the devcontainer can be a " -#~ "bit more involved. The good thing " -#~ "is you want have to do it. " -#~ "Usually it should be enough to " -#~ "install Docker on your system and " -#~ "ensure its available on your command " -#~ "line. Additionally, install the `VSCode " -#~ "Containers Extension `_." +#~ "The following command returns the " +#~ "current image hash referenced by the " +#~ "``superlink:1.8.0`` tag:" +#~ msgstr "" + +#~ msgid "Next, we can pin the hash when running a new SuperLink container:" +#~ msgstr "" + +#~ msgid "Setting environment variables" #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` " -#~ "(without extras)" +#~ "To set a variable inside a Docker" +#~ " container, you can use the ``-e " +#~ "=`` flag." #~ msgstr "" #~ msgid "" -#~ "``flwr = { path = " -#~ "\"../../dist/flwr-1.0.0-py3-none-any.whl\", extras =" -#~ " [\"simulation\"] }`` (with extras)" +#~ "This approach consists of two seprate" +#~ " phases: clipping of the updates and" +#~ " adding noise to the aggregated " +#~ "model. For the clipping phase, Flower" +#~ " framework has made it possible to" +#~ " decide whether to perform clipping " +#~ "on the server side or the client" +#~ " side." #~ msgstr "" -#~ msgid "Upload the whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" +#~ msgid "" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and poetentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." #~ msgstr "" #~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.7.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " +#~ "` | :doc:`🤗 " +#~ "Transformers ` " +#~ "| :doc:`JAX ` |" +#~ " :doc:`Pandas ` " +#~ "| :doc:`fastai `" +#~ " | :doc:`PyTorch Lightning ` | :doc:`scikit-" +#~ "learn ` | " +#~ ":doc:`XGBoost ` |" +#~ " :doc:`Android ` " +#~ "| :doc:`iOS `" #~ msgstr "" -#~ msgid "Before the release" +#~ msgid "flower-client-app" #~ msgstr "" -#~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" +#~ msgid ":py:obj:`flwr.client `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.common `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.server `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`flwr.simulation `\\" #~ msgstr "" -#~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." +#~ msgid "Run Flower client app." #~ msgstr "" -#~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "Tag the release commit with the " -#~ "version number as soon as the PR" -#~ " is merged: ``git tag v0.12.3``, then" -#~ " ``git push --tags``. This will " -#~ "create a draft release on GitHub " -#~ "containing the correct artifacts and the" -#~ " relevant part of the changelog." +#~ msgid "Run Flower SuperNode." #~ msgstr "" -#~ msgid "" -#~ "Note that, in order to build the" -#~ " documentation locally (with ``poetry run" -#~ " make html``, like described below), " -#~ "`Pandoc _` needs " -#~ "to be installed on the system." +#~ msgid ":py:obj:`flwr.client.mod `\\" #~ msgstr "" -#~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_ and examples " -#~ "of `good first contributions " -#~ "`_." +#~ msgid ":py:obj:`Context `\\ \\(state\\)" #~ msgstr "" -#~ msgid "" -#~ "This will create a `flower/` (or " -#~ "the name of your fork if you " -#~ "renamed it) folder in the current " -#~ "working directory." +#~ msgid "State of your run." #~ msgstr "" -#~ msgid "Otherwise you can always find this option in the `Branches` page." +#~ msgid "Metrics record." #~ msgstr "" #~ msgid "" -#~ "Once you click the `Compare & pull" -#~ " request` button, you should see " -#~ "something similar to this:" +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`str` | " +#~ ":py:class:`bytes` | :py:class:`bool` | " +#~ ":py:class:`~typing.List`\\ [:py:class:`int`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`float`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`str`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`] | " +#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]" #~ msgstr "" -#~ msgid "Find the source file in `doc/source`" +#~ msgid "Remove all items from R." #~ msgstr "" -#~ msgid "" -#~ "Make the change in the `.rst` file" -#~ " (beware, the dashes under the title" -#~ " should be the same length as " -#~ "the title itself)" +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "Change the file name to `save-progress.rst`" +#~ msgid "d defaults to None." #~ msgstr "" -#~ msgid "Add a redirect rule to `doc/source/conf.py`" +#~ msgid "Update R from dict/iterable E and F." #~ msgstr "" #~ msgid "" -#~ "This will cause a redirect from " -#~ "`saving-progress.html` to `save-progress.html`," -#~ " old links will continue to work." +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "For the lateral navigation bar to " -#~ "work properly, it is very important " -#~ "to update the `index.rst` file as " -#~ "well. This is where we define the" -#~ " whole arborescence of the navbar." +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "Find and modify the file name in `index.rst`" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgstr "" -#~ msgid "Add CI job to deploy the staging system when the `main` branch changes" +#~ msgid "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" #~ msgstr "" -#~ msgid "`Python 3.7 `_ or above" +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" #~ msgstr "" -#~ msgid "" -#~ "First, clone the `Flower repository " -#~ "`_ from GitHub::" +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgstr "" #~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "If you don't have :code:`pyenv` " -#~ "installed, you can use the following " -#~ "script that will install pyenv, set " -#~ "it up and create the virtual " -#~ "environment (with :code:`Python 3.8.17` by " -#~ "default)::" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgstr "" #~ msgid "" -#~ "Third, install the Flower package in " -#~ "development mode (think :code:`pip install " -#~ "-e`) along with all necessary " -#~ "dependencies::" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." #~ msgstr "" -#~ msgid "" -#~ "Developers could run the full set " -#~ "of Github Actions workflows under their" -#~ " local environment by using `Act " -#~ "_`. Please refer to" -#~ " the installation instructions under the" -#~ " linked repository and run the next" -#~ " command under Flower main cloned " -#~ "repository folder::" +#~ msgid ":py:obj:`partition_id `\\" #~ msgstr "" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental, the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ msgid "An identifier telling which data partition a ClientApp should use." #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. Therefore, we use " -#~ "an adaptive approach [andrew]_ that " -#~ "continuously adjusts the clipping threshold" -#~ " to track a prespecified quantile of" -#~ " the update norm distribution." +#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +#~ "[:py:class:`str`, :py:class:`int` | " +#~ ":py:class:`float` | :py:class:`~typing.List`\\ " +#~ "[:py:class:`int`] | :py:class:`~typing.List`\\ " +#~ "[:py:class:`float`]]" #~ msgstr "" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realises the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "A dataclass storing named Arrays in " +#~ "order. This means that it holds " +#~ "entries as an OrderedDict[str, Array]. " +#~ "ParametersRecord objects can be viewed " +#~ "as an equivalent to PyTorch's " +#~ "state_dict, but holding serialised tensors " +#~ "instead." #~ msgstr "" -#~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #~ msgstr "" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ msgid ":py:obj:`run_server_app `\\ \\(\\)" #~ msgstr "" -#~ msgid "" -#~ "McMahan, H. Brendan, et al. \"Learning" -#~ " differentially private recurrent language " -#~ "models.\" arXiv preprint arXiv:1710.06963 " -#~ "(2017)." +#~ msgid "Run Flower server app." #~ msgstr "" -#~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "private learning with adaptive clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems 34 (2021): 17455-17466." +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." #~ msgstr "" #~ msgid "" -#~ "The following command can be used " -#~ "to verfiy if Flower was successfully " -#~ "installed. If everything worked, it " -#~ "should print the version of Flower " -#~ "to the command line::" +#~ ":py:obj:`LegacyContext `\\ " +#~ "\\(state\\[\\, config\\, strategy\\, ...\\]\\)" #~ msgstr "" -#~ msgid "flwr (Python API reference)" +#~ msgid ":py:obj:`flwr.server.strategy `\\" #~ msgstr "" -#~ msgid "start_client" +#~ msgid ":py:obj:`flwr.server.workflow `\\" #~ msgstr "" -#~ msgid "start_numpy_client" +#~ msgid "run\\_driver\\_api" #~ msgstr "" -#~ msgid "start_simulation" +#~ msgid "run\\_fleet\\_api" #~ msgstr "" -#~ msgid "server.start_server" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "server.strategy" +#~ msgid "key shares." #~ msgstr "" -#~ msgid "server.strategy.Strategy" +#~ msgid "" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" #~ msgstr "" -#~ msgid "server.strategy.FedAvg" +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\[\\, ...\\]\\)" #~ msgstr "" -#~ msgid "server.strategy.FedAvgM" +#~ msgid "" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." #~ msgstr "" -#~ msgid "server.strategy.FedMedian" +#~ msgid "" +#~ "When diabled, only INFO, WARNING and " +#~ "ERROR log messages will be shown. " +#~ "If enabled, DEBUG-level logs will " +#~ "be displayed." #~ msgstr "" -#~ msgid "server.strategy.QFedAvg" +#~ msgid "" +#~ "A function creating client instances. " +#~ "The function must take a single " +#~ "`str` argument called `cid`. It should" +#~ " return a single client instance of" +#~ " type Client. Note that the created" +#~ " client instances are ephemeral and " +#~ "will often be destroyed after a " +#~ "single method invocation. Since client " +#~ "instances are not long-lived, they " +#~ "should not attempt to carry state " +#~ "over method invocations. Any state " +#~ "required by the instance (model, " +#~ "dataset, hyperparameters, ...) should be " +#~ "(re-)created in either the call to " +#~ "`client_fn` or the call to any of" +#~ " the client methods (e.g., load " +#~ "evaluation data in the `evaluate` method" +#~ " itself)." #~ msgstr "" -#~ msgid "server.strategy.FaultTolerantFedAvg" +#~ msgid "" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." #~ msgstr "" -#~ msgid "server.strategy.FedOpt" +#~ msgid "" +#~ "List `client_id`s for each client. This" +#~ " is only required if `num_clients` is" +#~ " not set. Setting both `num_clients` " +#~ "and `clients_ids` with `len(clients_ids)` not" +#~ " equal to `num_clients` generates an " +#~ "error." #~ msgstr "" -#~ msgid "server.strategy.FedProx" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." #~ msgstr "" -#~ msgid "server.strategy.FedAdagrad" +#~ msgid "" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." #~ msgstr "" -#~ msgid "server.strategy.FedAdam" +#~ msgid "" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" #~ msgstr "" -#~ msgid "server.strategy.FedYogi" +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" #~ msgstr "" -#~ msgid "server.strategy.FedTrimmedAvg" +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." #~ msgstr "" -#~ msgid "server.strategy.Krum" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" #~ msgstr "" -#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgid "In addition, we define the device allocation in PyTorch with:" #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgid "" +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ msgid "server.strategy.DPFedAvgFixed" +#~ msgid "" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "" + +#~ msgid "" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "" + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "" + +#~ msgid "" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "" + +#~ msgid "" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." #~ msgstr "" #~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." +#~ msgid "receive the updated local model weights" #~ msgstr "" -#~ msgid "" -#~ "Using the `client_fn`, Flower clients " -#~ "can interchangeably run as standalone " -#~ "processes (i.e. via `start_client`) or " -#~ "in simulation (i.e. via `start_simulation`)" -#~ " without requiring changes to how the" -#~ " client class is defined and " -#~ "instantiated. Calling `start_numpy_client` is " -#~ "now deprecated." +#~ msgid "which can be implemented in the following way:" #~ msgstr "" #~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384)), " -#~ "([#2425](https://github.com/adap/flower/pull/2425))" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" #~ msgid "" -#~ "**General updates to baselines** " -#~ "([#2301](https://github.com/adap/flower/pull/2301), " -#~ "[#2305](https://github.com/adap/flower/pull/2305), " -#~ "[#2307](https://github.com/adap/flower/pull/2307), " -#~ "[#2327](https://github.com/adap/flower/pull/2327), " -#~ "[#2435](https://github.com/adap/flower/pull/2435))" +#~ "In this example, we split the " +#~ "dataset into two partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=2)`). " +#~ "Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`node_id`:" #~ msgstr "" #~ msgid "" -#~ "**General updates to the simulation " -#~ "engine** ([#2331](https://github.com/adap/flower/pull/2331), " -#~ "[#2447](https://github.com/adap/flower/pull/2447), " -#~ "[#2448](https://github.com/adap/flower/pull/2448))" +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" #~ msgid "" -#~ "**General improvements** " -#~ "([#2309](https://github.com/adap/flower/pull/2309), " -#~ "[#2310](https://github.com/adap/flower/pull/2310), " -#~ "[2313](https://github.com/adap/flower/pull/2313), " -#~ "[#2316](https://github.com/adap/flower/pull/2316), " -#~ "[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -#~ " [#2360](https://github.com/adap/flower/pull/2360), " -#~ "[#2402](https://github.com/adap/flower/pull/2402), " -#~ "[#2446](https://github.com/adap/flower/pull/2446))" +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. the " +#~ "returned Booster object and config are" +#~ " stored in :code:`self.bst` and " +#~ ":code:`self.config`, respectively. From the " +#~ "second round, we load the global " +#~ "model sent from server to " +#~ ":code:`self.bst`, and then update model " +#~ "weights on local training data with " +#~ "function :code:`local_boost` as follows:" #~ msgstr "" #~ msgid "" -#~ "`flower-superlink --driver-api-address " -#~ "\"0.0.0.0:8081\" --fleet-api-address " -#~ "\"0.0.0.0:8086\"`" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`self.bst.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." #~ msgstr "" #~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ "In :code:`evaluate`, we call " +#~ ":code:`self.bst.eval_set` function to conduct " +#~ "evaluation on valid set. The AUC " +#~ "value will be returned." #~ msgstr "" #~ msgid "" #~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " +#~ " have to implement :code:`Client`and call" +#~ " :code:`fl.client.start_client()`. The string " #~ ":code:`\"[::]:8080\"` tells the client which" #~ " server to connect to. In our " #~ "case we can run the server and " @@ -21975,53 +29257,50 @@ msgstr "" #~ msgstr "" #~ msgid "" -#~ "Let's build a horizontal federated " -#~ "learning system using XGBoost and " -#~ "Flower!" -#~ msgstr "" - -#~ msgid "" -#~ "Please refer to the `full code " -#~ "example `_ to learn " -#~ "more." +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients." #~ msgstr "" #~ msgid "" -#~ "In this notebook, we'll build a " -#~ "federated learning system using Flower " -#~ "and PyTorch. In part 1, we use " -#~ "PyTorch for the model training pipeline" -#~ " and data loading. In part 2, " -#~ "we continue to federate the PyTorch-" -#~ "based pipeline using Flower." +#~ "Welcome to the third part of the" +#~ " Flower federated learning tutorial. In " +#~ "previous parts of this tutorial, we " +#~ "introduced federated learning with PyTorch " +#~ "and Flower (`part 1 " +#~ "`__) and we " +#~ "learned how strategies can be used " +#~ "to customize the execution on both " +#~ "the server and the clients (`part " +#~ "2 `__)." #~ msgstr "" #~ msgid "" -#~ "Next, we install the necessary packages" -#~ " for PyTorch (``torch`` and " -#~ "``torchvision``) and Flower (``flwr``):" +#~ "In this notebook, we'll continue to " +#~ "customize the federated learning system " +#~ "we built previously by creating a " +#~ "custom version of FedAvg (again, using" +#~ " `Flower `__ and `PyTorch " +#~ "`__)." #~ msgstr "" #~ msgid "" -#~ "Federated learning can be applied to " -#~ "many different types of tasks across " -#~ "different domains. In this tutorial, we" -#~ " introduce federated learning by training" -#~ " a simple convolutional neural network " -#~ "(CNN) on the popular CIFAR-10 dataset." -#~ " CIFAR-10 can be used to train " -#~ "image classifiers that distinguish between " -#~ "images from ten different classes:" +#~ "`Star Flower on GitHub " +#~ "`__ ⭐️ and join " +#~ "the Flower community on Slack to " +#~ "connect, ask questions, and get help:" +#~ " `Join Slack `__" +#~ " 🌼 We'd love to hear from you" +#~ " in the ``#introductions`` channel! And " +#~ "if anything is unclear, head over " +#~ "to the ``#questions`` channel." #~ msgstr "" -#~ msgid "" -#~ "Each organization will act as a " -#~ "client in the federated learning system." -#~ " So having ten organizations participate" -#~ " in a federation means having ten " -#~ "clients connected to the federated " -#~ "learning server:" +#~ msgid "Let's build a new ``Strategy`` from scratch!" #~ msgstr "" #~ msgid "" @@ -22029,2404 +29308,2908 @@ msgstr "" #~ "and test set, partition them into " #~ "ten smaller datasets (each split into" #~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" -#~ msgstr "" - -#~ msgid "|ed6498a023f2477a9ccd57ee4514bda4|" -#~ msgstr "" - -#~ msgid "|5a4f742489ac4f819afefdd4dc9ab272|" -#~ msgstr "" - -#~ msgid "|3331c80cd05045f6a56524d8e3e76d0c|" -#~ msgstr "" - -#~ msgid "|4987b26884ec4b2c8f06c1264bcebe60|" +#~ "wrap everything in their own " +#~ "``DataLoader``. We introduce a new " +#~ "parameter ``num_clients`` which allows us " +#~ "to call ``load_datasets`` with different " +#~ "numbers of clients." #~ msgstr "" -#~ msgid "|ec8ae2d778aa493a986eb2fa29c220e5|" -#~ msgstr "" - -#~ msgid "|b8949d0669fe4f8eadc9a4932f4e9c57|" -#~ msgstr "" - -#~ msgid "|94ff30bdcd09443e8488b5f29932a541|" +#~ msgid "" +#~ "To implement the Flower client, we " +#~ "(again) create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``. Here, we also pass the" +#~ " ``cid`` to the client and use " +#~ "it log additional details:" #~ msgstr "" -#~ msgid "|48dccf1d6d0544bba8917d2783a47719|" +#~ msgid "" +#~ "Let's go deeper and see what it" +#~ " takes to move from ``NumPyClient`` " +#~ "to ``Client``!" #~ msgstr "" -#~ msgid "|0366618db96b4f329f0d4372d1150fde|" +#~ msgid "" +#~ "So far, we've implemented our client " +#~ "by subclassing ``flwr.client.NumPyClient``. The " +#~ "three methods we implemented are " +#~ "``get_parameters``, ``fit``, and ``evaluate``. " +#~ "Finally, we wrap the creation of " +#~ "instances of this class in a " +#~ "function called ``client_fn``:" #~ msgstr "" -#~ msgid "|ac80eddc76e6478081b1ca35eed029c0|" +#~ msgid "" +#~ "We've seen this before, there's nothing" +#~ " new so far. The only *tiny* " +#~ "difference compared to the previous " +#~ "notebook is naming, we've changed " +#~ "``FlowerClient`` to ``FlowerNumPyClient`` and " +#~ "``client_fn`` to ``numpyclient_fn``. Let's run" +#~ " it to see the output we get:" #~ msgstr "" -#~ msgid "|1ac94140c317450e89678db133c7f3c2|" +#~ msgid "" +#~ "This works as expected, two clients " +#~ "are training for three rounds of " +#~ "federated learning." #~ msgstr "" -#~ msgid "|f8850c6e96fc4430b55e53bba237a7c0|" +#~ msgid "" +#~ "Let's dive a little bit deeper and" +#~ " discuss how Flower executes this " +#~ "simulation. Whenever a client is " +#~ "selected to do some work, " +#~ "``start_simulation`` calls the function " +#~ "``numpyclient_fn`` to create an instance " +#~ "of our ``FlowerNumPyClient`` (along with " +#~ "loading the model and the data)." #~ msgstr "" -#~ msgid "|4a368fdd3fc34adabd20a46752a68582|" +#~ msgid "" +#~ "`Check out Flower Code Examples " +#~ "`__" #~ msgstr "" -#~ msgid "|40f69c17bb444652a7c8dfe577cd120e|" +#~ msgid "" +#~ "`Watch Flower Summit 2023 videos " +#~ "`__" #~ msgstr "" #~ msgid "" -#~ "Please follow the first section on " -#~ "`Run Flower using Docker " -#~ "`_ which covers this" -#~ " step in more detail." +#~ "In this notebook, we'll build a " +#~ "federated learning system using Flower, " +#~ "`Flower Datasets `__ " +#~ "and PyTorch. In part 1, we use " +#~ "PyTorch for the model training pipeline" +#~ " and data loading. In part 2, " +#~ "we continue to federate the PyTorch-" +#~ "based pipeline using Flower." #~ msgstr "" -#~ msgid "" -#~ "Since `Flower 1.5 `_ we have " -#~ "introduced translations to our doc " -#~ "pages, but, as you might have " -#~ "noticed, the translations are often " -#~ "imperfect. If you speak languages other" -#~ " than English, you might be able " -#~ "to help us in our effort to " -#~ "make Federated Learning accessible to as" -#~ " many people as possible by " -#~ "contributing to those translations! This " -#~ "might also be a great opportunity " -#~ "for those wanting to become open " -#~ "source contributors with little prerequistes." +#~ msgid "Loading the data" #~ msgstr "" #~ msgid "" -#~ "You input your translation in the " -#~ "textbox at the top and then, once" -#~ " you are happy with it, you " -#~ "either press ``Save and continue`` (to" -#~ " save the translation and go to " -#~ "the next untranslated string), ``Save " -#~ "and stay`` (to save the translation " -#~ "and stay on the same page), " -#~ "``Suggest`` (to add your translation to" -#~ " suggestions for other users to " -#~ "view), or ``Skip`` (to go to the" -#~ " next untranslated string without saving" -#~ " anything)." +#~ "We simulate having multiple datasets " +#~ "from multiple organizations (also called " +#~ "the \"cross-silo\" setting in federated" +#~ " learning) by splitting the original " +#~ "CIFAR-10 dataset into multiple partitions. " +#~ "Each partition will represent the data" +#~ " from a single organization. We're " +#~ "doing this purely for experimentation " +#~ "purposes, in the real world there's " +#~ "no need for data splitting because " +#~ "each organization already has their own" +#~ " data (so the data is naturally " +#~ "partitioned)." #~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "Each organization will act as a " +#~ "client in the federated learning system." +#~ " So having ten organizations participate" +#~ " in a federation means having ten " +#~ "clients connected to the federated " +#~ "learning server." #~ msgstr "" #~ msgid "" -#~ "Source: `Official VSCode documentation " -#~ "`_" +#~ "Let's now create the Federated Dataset" +#~ " abstraction that from ``flwr-datasets``" +#~ " that partitions the CIFAR-10. We " +#~ "will create small training and test " +#~ "set for each edge device and wrap" +#~ " each of them into a PyTorch " +#~ "``DataLoader``:" #~ msgstr "" #~ msgid "" -#~ "`Developing inside a Container " -#~ "`_" +#~ "We now have a list of ten " +#~ "training sets and ten validation sets" +#~ " (``trainloaders`` and ``valloaders``) " +#~ "representing the data of ten different" +#~ " organizations. Each ``trainloader``/``valloader`` " +#~ "pair contains 4000 training examples and" +#~ " 1000 validation examples. There's also " +#~ "a single ``testloader`` (we did not " +#~ "split the test set). Again, this " +#~ "is only necessary for building research" +#~ " or educational systems, actual federated" +#~ " learning systems have their data " +#~ "naturally distributed across multiple " +#~ "partitions." #~ msgstr "" #~ msgid "" -#~ "`Remote development in Containers " -#~ "`_" +#~ "Let's take a look at the first " +#~ "batch of images and labels in the" +#~ " first training set (i.e., " +#~ "``trainloaders[0]``) before we move on:" #~ msgstr "" #~ msgid "" -#~ "If you are not familiar with " -#~ "Flower Baselines, you should probably " -#~ "check-out our `contributing guide for " -#~ "baselines `_." +#~ "The output above shows a random " +#~ "batch of images from the first " +#~ "``trainloader`` in our list of ten " +#~ "``trainloaders``. It also prints the " +#~ "labels associated with each image (i.e.," +#~ " one of the ten possible labels " +#~ "we've seen above). If you run the" +#~ " cell again, you should see another" +#~ " batch of images." #~ msgstr "" -#~ msgid "" -#~ "You should then check out the open" -#~ " `issues " -#~ "`_" -#~ " for baseline requests. If you find" -#~ " a baseline that you'd like to " -#~ "work on and that has no assignes," -#~ " feel free to assign it to " -#~ "yourself and start working on it!" +#~ msgid "Defining the model" #~ msgstr "" -#~ msgid "" -#~ "If you're familiar with how contributing" -#~ " on GitHub works, you can directly" -#~ " checkout our `getting started guide " -#~ "for contributors `_." +#~ msgid "Training the model" #~ msgstr "" #~ msgid "" -#~ "Git is a distributed version control " -#~ "tool. This allows for an entire " -#~ "codebase's history to be stored and " -#~ "every developer's machine. It is a " -#~ "software that will need to be " -#~ "installed on your local machine, you " -#~ "can follow this `guide " -#~ "`_ to set it up." +#~ "We now have all the basic building" +#~ " blocks we need: a dataset, a " +#~ "model, a training function, and a " +#~ "test function. Let's put them together" +#~ " to train the model on the " +#~ "dataset of one of our organizations " +#~ "(``trainloaders[0]``). This simulates the " +#~ "reality of most machine learning " +#~ "projects today: each organization has " +#~ "their own data and trains models " +#~ "only on this internal data:" #~ msgstr "" #~ msgid "" -#~ "A fork is a personal copy of " -#~ "a GitHub repository. To create one " -#~ "for Flower, you must navigate to " -#~ "https://github.com/adap/flower (while connected to" -#~ " your GitHub account) and click the" -#~ " ``Fork`` button situated on the top" -#~ " right of the page." +#~ "Training the simple CNN on our " +#~ "CIFAR-10 split for 5 epochs should " +#~ "result in a test set accuracy of" +#~ " about 41%, which is not good, " +#~ "but at the same time, it doesn't" +#~ " really matter for the purposes of" +#~ " this tutorial. The intent was just" +#~ " to show a simplistic centralized " +#~ "training pipeline that sets the stage" +#~ " for what comes next - federated " +#~ "learning!" #~ msgstr "" -#~ msgid "" -#~ "Now we will add an upstream " -#~ "address to our repository. Still in " -#~ "the same directroy, we must run " -#~ "the following command:" +#~ msgid "Updating model parameters" #~ msgstr "" #~ msgid "" -#~ "This can be achieved by following " -#~ "this `getting started guide for " -#~ "contributors`_ (note that you won't need" -#~ " to clone the repository). Once you" -#~ " are able to write code and " -#~ "test it, you can finally start " -#~ "making changes!" +#~ "In federated learning, the server sends" +#~ " the global model parameters to the" +#~ " client, and the client updates the" +#~ " local model with the parameters " +#~ "received from the server. It then " +#~ "trains the model on the local data" +#~ " (which changes the model parameters " +#~ "locally) and sends the updated/changed " +#~ "model parameters back to the server " +#~ "(or, alternatively, it sends just the" +#~ " gradients back to the server, not" +#~ " the full model parameters)." #~ msgstr "" #~ msgid "" -#~ "For our documentation, we’ve started to" -#~ " use the `Diàtaxis framework " -#~ "`_." +#~ "The details of how this works are" +#~ " not really important here (feel free" +#~ " to consult the PyTorch documentation " +#~ "if you want to learn more). In " +#~ "essence, we use ``state_dict`` to access" +#~ " PyTorch model parameter tensors. The " +#~ "parameter tensors are then converted " +#~ "to/from a list of NumPy ndarray's " +#~ "(which Flower knows how to " +#~ "serialize/deserialize):" +#~ msgstr "" + +#~ msgid "Implementing a Flower client" #~ msgstr "" #~ msgid "" -#~ "Our “How to” guides should have " -#~ "titles that continue the sencence “How" -#~ " to …”, for example, “How to " -#~ "upgrade to Flower 1.0”." +#~ "With that out of the way, let's" +#~ " move on to the interesting part. " +#~ "Federated learning systems consist of a" +#~ " server and multiple clients. In " +#~ "Flower, we create clients by " +#~ "implementing subclasses of ``flwr.client.Client``" +#~ " or ``flwr.client.NumPyClient``. We use " +#~ "``NumPyClient`` in this tutorial because " +#~ "it is easier to implement and " +#~ "requires us to write less boilerplate." #~ msgstr "" #~ msgid "" -#~ "This issue is about changing the " -#~ "title of a doc from present " -#~ "continious to present simple." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" #~ msgid "" -#~ "Let's take the example of “Saving " -#~ "Progress” which we changed to “Save " -#~ "Progress”. Does this pass our check?" +#~ "``fit``: Receive model parameters from " +#~ "the server, train the model parameters" +#~ " on the local data, and return " +#~ "the (updated) model parameters to the" +#~ " server" #~ msgstr "" -#~ msgid "Before: ”How to saving progress” ❌" +#~ msgid "" +#~ "``evaluate``: Receive model parameters from" +#~ " the server, evaluate the model " +#~ "parameters on the local data, and " +#~ "return the evaluation result to the " +#~ "server" #~ msgstr "" -#~ msgid "After: ”How to save progress” ✅" +#~ msgid "" +#~ "Our class ``FlowerClient`` defines how " +#~ "local training/evaluation will be performed" +#~ " and allows Flower to call the " +#~ "local training/evaluation through ``fit`` and" +#~ " ``evaluate``. Each instance of " +#~ "``FlowerClient`` represents a *single client*" +#~ " in our federated learning system. " +#~ "Federated learning systems have multiple " +#~ "clients (otherwise, there's not much to" +#~ " federate), so each client will be" +#~ " represented by its own instance of" +#~ " ``FlowerClient``. If we have, for " +#~ "example, three clients in our workload," +#~ " then we'd have three instances of" +#~ " ``FlowerClient``. Flower calls " +#~ "``FlowerClient.fit`` on the respective " +#~ "instance when the server selects a " +#~ "particular client for training (and " +#~ "``FlowerClient.evaluate`` for evaluation)." +#~ msgstr "" + +#~ msgid "Using the Virtual Client Engine" #~ msgstr "" #~ msgid "" -#~ "This is a tiny change, but it’ll" -#~ " allow us to test your end-" -#~ "to-end setup. After cloning and " -#~ "setting up the Flower repo, here’s " -#~ "what you should do:" -#~ msgstr "" +#~ "In this notebook, we want to " +#~ "simulate a federated learning system " +#~ "with 10 clients on a single " +#~ "machine. This means that the server " +#~ "and all 10 clients will live on" +#~ " a single machine and share resources" +#~ " such as CPU, GPU, and memory. " +#~ "Having 10 clients would mean having " +#~ "10 instances of ``FlowerClient`` in " +#~ "memory. Doing this on a single " +#~ "machine can quickly exhaust the " +#~ "available memory resources, even if only" +#~ " a subset of these clients " +#~ "participates in a single round of " +#~ "federated learning." +#~ msgstr "" #~ msgid "" -#~ "Build the docs and check the " -#~ "result: ``_" +#~ "In addition to the regular capabilities" +#~ " where server and clients run on " +#~ "multiple machines, Flower, therefore, provides" +#~ " special simulation capabilities that " +#~ "create ``FlowerClient`` instances only when" +#~ " they are actually necessary for " +#~ "training or evaluation. To enable the" +#~ " Flower framework to create clients " +#~ "when necessary, we need to implement " +#~ "a function called ``client_fn`` that " +#~ "creates a ``FlowerClient`` instance on " +#~ "demand. Flower calls ``client_fn`` whenever" +#~ " it needs an instance of one " +#~ "particular client to call ``fit`` or " +#~ "``evaluate`` (those instances are usually " +#~ "discarded after use, so they should " +#~ "not keep any local state). Clients " +#~ "are identified by a client ID, or" +#~ " short ``cid``. The ``cid`` can be" +#~ " used, for example, to load different" +#~ " local data partitions for different " +#~ "clients, as can be seen below:" #~ msgstr "" -#~ msgid "Here’s how to change the file name:" +#~ msgid "Starting the training" #~ msgstr "" #~ msgid "" -#~ "Commit the changes (commit messages are" -#~ " always imperative: “Do something”, in " -#~ "this case “Change …”)" +#~ "We now have the class ``FlowerClient``" +#~ " which defines client-side " +#~ "training/evaluation and ``client_fn`` which " +#~ "allows Flower to create ``FlowerClient`` " +#~ "instances whenever it needs to call " +#~ "``fit`` or ``evaluate`` on one " +#~ "particular client. The last step is " +#~ "to start the actual simulation using " +#~ "``flwr.simulation.start_simulation``." #~ msgstr "" #~ msgid "" -#~ "`Good first contributions " -#~ "`_, where you should" -#~ " particularly look into the " -#~ ":code:`baselines` contributions." +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existant, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ "Flower has a number of built-in" +#~ " strategies, but we can also use " +#~ "our own strategy implementations to " +#~ "customize nearly all aspects of the " +#~ "federated learning approach. For this " +#~ "example, we use the built-in " +#~ "``FedAvg`` implementation and customize it " +#~ "using a few basic parameters. The " +#~ "last step is the actual call to" +#~ " ``start_simulation`` which - you guessed" +#~ " it - starts the simulation:" +#~ msgstr "" + +#~ msgid "" +#~ "When we call ``start_simulation``, we " +#~ "tell Flower that there are 10 " +#~ "clients (``num_clients=10``). Flower then goes" +#~ " ahead an asks the ``FedAvg`` " +#~ "strategy to select clients. ``FedAvg`` " +#~ "knows that it should select 100% " +#~ "of the available clients " +#~ "(``fraction_fit=1.0``), so it goes ahead " +#~ "and selects 10 random clients (i.e., " +#~ "100% of 10)." #~ msgstr "" #~ msgid "" -#~ "Flower uses :code:`pyproject.toml` to manage" -#~ " dependencies and configure development " -#~ "tools (the ones which support it). " -#~ "Poetry is a build tool which " -#~ "supports `PEP 517 " -#~ "`_." +#~ "Flower then asks the selected 10 " +#~ "clients to train the model. When " +#~ "the server receives the model parameter" +#~ " updates from the clients, it hands" +#~ " those updates over to the strategy" +#~ " (*FedAvg*) for aggregation. The strategy" +#~ " aggregates those updates and returns " +#~ "the new global model, which then " +#~ "gets used in the next round of " +#~ "federated learning." #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing machine learning" -#~ " workload with `FedBN `_, a federated training strategy" -#~ " designed for non-iid data. We " -#~ "are using PyTorch to train a " -#~ "Convolutional Neural Network(with Batch " -#~ "Normalization layers) on the CIFAR-10 " -#~ "dataset. When applying FedBN, only few" -#~ " changes needed compared to `Example: " -#~ "PyTorch - From Centralized To Federated" -#~ " `_." +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" #~ msgstr "" #~ msgid "" -#~ "All files are revised based on " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. The " -#~ "only thing to do is modifying the" -#~ " file called :code:`cifar.py`, revised part" -#~ " is shown below:" +#~ "In this notebook, we'll begin to " +#~ "customize the federated learning system " +#~ "we built in the introductory notebook" +#~ " (again, using `Flower `__" +#~ " and `PyTorch `__)." +#~ msgstr "" + +#~ msgid "Let's move beyond FedAvg with Flower strategies!" #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used PyTorch " -#~ "before. Let's take the next step " -#~ "and use what we've built to create" -#~ " a federated learning system within " -#~ "FedBN, the sytstem consists of one " -#~ "server and two clients." +#~ "Flower, by default, initializes the " +#~ "global model by asking one random " +#~ "client for the initial parameters. In" +#~ " many cases, we want more control " +#~ "over parameter initialization though. Flower" +#~ " therefore allows you to directly " +#~ "pass the initial parameters to the " +#~ "Strategy:" #~ msgstr "" #~ msgid "" -#~ "If you have read `Example: PyTorch " -#~ "- From Centralized To Federated " -#~ "`_, the following" -#~ " parts are easy to follow, onyl " -#~ ":code:`get_parameters` and :code:`set_parameters` " -#~ "function in :code:`client.py` needed to " -#~ "revise. If not, please read the " -#~ "`Example: PyTorch - From Centralized To" -#~ " Federated `_. first." +#~ "Passing ``initial_parameters`` to the " +#~ "``FedAvg`` strategy prevents Flower from " +#~ "asking one of the clients for the" +#~ " initial parameters. If we look " +#~ "closely, we can see that the logs" +#~ " do not show any calls to the" +#~ " ``FlowerClient.get_parameters`` method." #~ msgstr "" -#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgid "" +#~ "We've seen the function ``start_simulation``" +#~ " before. It accepts a number of " +#~ "arguments, amongst them the ``client_fn`` " +#~ "used to create ``FlowerClient`` instances, " +#~ "the number of clients to simulate " +#~ "``num_clients``, the number of rounds " +#~ "``num_rounds``, and the strategy." #~ msgstr "" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." +#~ "Next, we'll just pass this function " +#~ "to the FedAvg strategy before starting" +#~ " the simulation:" #~ msgstr "" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" +#~ "We now have 1000 partitions, each " +#~ "holding 45 training and 5 validation " +#~ "examples. Given that the number of " +#~ "training examples on each client is " +#~ "quite small, we should probably train" +#~ " the model a bit longer, so we" +#~ " configure the clients to perform 3" +#~ " local training epochs. We should " +#~ "also adjust the fraction of clients " +#~ "selected for training during each round" +#~ " (we don't want all 1000 clients " +#~ "participating in every round), so we " +#~ "adjust ``fraction_fit`` to ``0.05``, which " +#~ "means that only 5% of available " +#~ "clients (so 50 clients) will be " +#~ "selected for training each round:" #~ msgstr "" -#~ msgid "Ready... Set... Train!" +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" #~ msgstr "" -#~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" #~ msgstr "" -#~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" #~ msgstr "" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ msgid "Now, let's see what is really happening inside." +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" #~ msgstr "" -#~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" #~ msgstr "" -#~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" #~ msgstr "" -#~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." +#~ msgid "|032eb6fed6924ac387b9f13854919196|" #~ msgstr "" -#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" #~ msgstr "" -#~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" #~ msgstr "" -#~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" #~ msgstr "" -#~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" -#~ msgid "A Closer Look" +#~ msgid ":py:obj:`client `\\" #~ msgstr "" -#~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ msgid ":py:obj:`common `\\" #~ msgstr "" -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" +#~ msgid ":py:obj:`server `\\" #~ msgstr "" -#~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." +#~ msgid ":py:obj:`simulation `\\" #~ msgstr "" -#~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" +#~ msgid ":py:obj:`mod `\\" #~ msgstr "" -#~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." +#~ msgid "run\\_client\\_app" #~ msgstr "" -#~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" +#~ msgid "run\\_supernode" #~ msgstr "" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "Give It a Try" +#~ msgid "Retrieve the corresponding layout by the string key." #~ msgstr "" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." #~ msgstr "" -#~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." +#~ msgid "the string key as the query for the layout." #~ msgstr "" -#~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" +#~ msgid "Corresponding layout based on the query." #~ msgstr "" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" #~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" #~ msgstr "" -#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgid ":py:obj:`strategy `\\" #~ msgstr "" -#~ msgid "Differential privacy" +#~ msgid ":py:obj:`workflow `\\" #~ msgstr "" -#~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ msgid "run\\_server\\_app" #~ msgstr "" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." +#~ msgid "run\\_superlink" #~ msgstr "" #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" #~ msgstr "" -#~ msgid "DP-FedAvg" +#~ msgid "Start a Ray-based Flower simulation server." #~ msgstr "" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." #~ msgstr "" -#~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." +#~ msgid "The total number of clients in this simulation." #~ msgstr "" #~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" #~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." #~ msgstr "" -#~ msgid "Simplifying Assumptions" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." #~ msgstr "" #~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." #~ msgstr "" #~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." #~ msgstr "" #~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." #~ msgstr "" #~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" #~ msgstr "" #~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." #~ msgstr "" #~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." #~ msgstr "" -#~ msgid "Customizable Responsibility for Noise injection" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." #~ msgstr "" #~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." #~ msgstr "" #~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" #~ msgstr "" -#~ msgid "Wrapper-based approach" +#~ msgid "**hist** -- Object containing metrics from training." #~ msgstr "" #~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." #~ msgstr "" -#~ msgid "Server-side logic" +#~ msgid "Let's build a federated learning system using fastai and Flower!" #~ msgstr "" #~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" #~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." #~ msgstr "" #~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." #~ msgstr "" -#~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." +#~ msgid "Dependencies" #~ msgstr "" #~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" #~ msgstr "" -#~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." +#~ msgid "Standard Hugging Face workflow" #~ msgstr "" - -#~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." + +#~ msgid "Handling the data" #~ msgstr "" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" -#~ msgid "Client-side logic" +#~ msgid "Training and testing the model" #~ msgstr "" #~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" -#~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." +#~ msgid "Creating the model itself" #~ msgstr "" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" #~ msgstr "" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgid "Federating the example" +#~ msgstr "" + +#~ msgid "Creating the IMDBClient" #~ msgstr "" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" #~ msgid "" -#~ "McMahan et al. \"Learning Differentially " -#~ "Private Recurrent Language Models.\" " -#~ "International Conference on Learning " -#~ "Representations (ICLR), 2017." +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" + +#~ msgid "Starting the server" #~ msgstr "" #~ msgid "" -#~ "Andrew, Galen, et al. \"Differentially " -#~ "Private Learning with Adaptive Clipping.\" " -#~ "Advances in Neural Information Processing " -#~ "Systems (NeurIPS), 2021." +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" #~ msgstr "" #~ msgid "" -#~ "This can be achieved by customizing " -#~ "an existing strategy or by `implementing" -#~ " a custom strategy from scratch " -#~ "`_. Here's a nonsensical " -#~ "example that customizes :code:`FedAvg` by " -#~ "adding a custom ``\"hello\": \"world\"`` " -#~ "configuration key/value pair to the " -#~ "config dict of a *single client* " -#~ "(only the first client in the " -#~ "list, the other clients in this " -#~ "round to not receive this \"special\"" -#~ " config value):" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" + +#~ msgid "Putting everything together" +#~ msgstr "" + +#~ msgid "We can now start client instances using:" #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_fit` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_fit`." +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." #~ msgstr "" #~ msgid "" -#~ "More sophisticated implementations can use " -#~ ":code:`configure_evaluate` to implement custom " -#~ "client selection logic. A client will" -#~ " only participate in a round if " -#~ "the corresponding :code:`ClientProxy` is " -#~ "included in the the list returned " -#~ "from :code:`configure_evaluate`." +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" #~ msgid "" -#~ "`How to run Flower using Docker " -#~ "`_" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." #~ msgstr "" #~ msgid "" -#~ "Ray Dashboard: ``_" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." #~ msgstr "" #~ msgid "" -#~ "Ray Metrics: ``_" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." #~ msgstr "" -#~ msgid "Enjoy building more robust and flexible ``ClientApp``s with mods!" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`ClientApp `\\ " -#~ "\\(client\\_fn\\[\\, mods\\]\\)" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" -#~ msgid ":py:obj:`flwr.server.driver `\\" +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." #~ msgstr "" -#~ msgid "Flower driver SDK." +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" #~ msgstr "" -#~ msgid "driver" +#~ msgid "Before Flower can be imported we have to install it:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`GrpcDriver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." #~ msgstr "" -#~ msgid "`GrpcDriver` provides access to the gRPC Driver API/service." +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" #~ msgstr "" -#~ msgid ":py:obj:`get_nodes `\\ \\(\\)" +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(task\\_ids\\)" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" -#~ msgid "Get task results." +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ " -#~ "\\(task\\_ins\\_list\\)" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "Schedule tasks." +#~ msgid "Each client will have its own dataset." #~ msgstr "" -#~ msgid "GrpcDriver" +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" #~ msgstr "" -#~ msgid ":py:obj:`connect `\\ \\(\\)" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ msgid "Connect to the Driver API." +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`create_run " -#~ "`\\ \\(req\\)" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" #~ msgstr "" -#~ msgid "Request for run ID." +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`disconnect " -#~ "`\\ \\(\\)" +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ msgid "Disconnect from the Driver API." +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`get_nodes `\\" -#~ " \\(req\\)" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "Get client IDs." +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`pull_task_res " -#~ "`\\ \\(req\\)" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "" -#~ ":py:obj:`push_task_ins " -#~ "`\\ \\(req\\)" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "" -#~ "Optionally specify the type of actor " -#~ "to use. The actor object, which " -#~ "persists throughout the simulation, will " -#~ "be the process in charge of " -#~ "running the clients' jobs (i.e. their" -#~ " `fit()` method)." +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "" -#~ "Much effort went into a completely " -#~ "restructured Flower docs experience. The " -#~ "documentation on [flower.ai/docs](flower.ai/docs) is" -#~ " now divided into Flower Framework, " -#~ "Flower Baselines, Flower Android SDK, " -#~ "Flower iOS SDK, and code example " -#~ "projects." +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "" -#~ "The first preview release of Flower " -#~ "Baselines has arrived! We're kickstarting " -#~ "Flower Baselines with implementations of " -#~ "FedOpt (FedYogi, FedAdam, FedAdagrad), FedBN," -#~ " and FedAvgM. Check the documentation " -#~ "on how to use [Flower " -#~ "Baselines](https://flower.ai/docs/using-baselines.html). " -#~ "With this first preview release we're" -#~ " also inviting the community to " -#~ "[contribute their own " -#~ "baselines](https://flower.ai/docs/contributing-baselines.html)." +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "The following examples are available as standalone projects." +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ msgid "Quickstart TensorFlow/Keras" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ msgid "" -#~ "`Quickstart TensorFlow (Tutorial) " -#~ "`_" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ msgid "" -#~ "`Quickstart PyTorch (Tutorial) " -#~ "`_" +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ msgid "" -#~ "`PyTorch: From Centralized To Federated " -#~ "(Tutorial) `_" +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" #~ msgstr "" -#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" #~ msgstr "" -#~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" #~ msgstr "" -#~ msgid "Extra Dependencies" +#~ msgid "|163117eb654a4273babba413cf8065f5|" #~ msgstr "" -#~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" #~ msgstr "" -#~ msgid "For PyTorch examples::" +#~ msgid "|f403fcd69e4e44409627e748b404c086|" #~ msgstr "" -#~ msgid "For TensorFlow examples::" +#~ msgid "|4b00fe63870145968f8443619a792a42|" #~ msgstr "" -#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgid "|368378731066486fa4397e89bc6b870c|" #~ msgstr "" -#~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" #~ msgstr "" -#~ msgid "PyTorch Examples" +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" + +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" #~ msgstr "" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." +#~ "The Visual Studio Code Remote - " +#~ "Containers extension lets you use a " +#~ "Docker container as a fully-featured " +#~ "development environment. It allows you " +#~ "to open any folder inside (or " +#~ "mounted into) a container and take " +#~ "advantage of Visual Studio Code's full" +#~ " feature set. A :code:`devcontainer.json` " +#~ "file in your project tells VS Code" +#~ " how to access (or create) a " +#~ "development container with a well-" +#~ "defined tool and runtime stack. This " +#~ "container can be used to run an" +#~ " application or to separate tools, " +#~ "libraries, or runtimes needed for " +#~ "working with a codebase." #~ msgstr "" -#~ msgid "CIFAR-10 Image Classification" +#~ msgid "" +#~ "Configuring and setting up the " +#~ ":code:`Dockerfile` as well the configuration" +#~ " for the devcontainer can be a " +#~ "bit more involved. The good thing " +#~ "is you don't have to do it. " +#~ "Usually it should be enough to " +#~ "install `Docker " +#~ "`_ on your " +#~ "system and ensure its available on " +#~ "your command line. Additionally, install " +#~ "the `VSCode Containers Extension " +#~ "`_." #~ msgstr "" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "If you prefer to use Anaconda for" +#~ " your virtual environment then install " +#~ "and setup the `conda " +#~ "`_ package. After setting" +#~ " it up you can create a virtual" +#~ " environment with:" #~ msgstr "" -#~ msgid "First, start a Flower server:" +#~ msgid "The :code:`SecAgg+` abstraction" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "The :code:`LightSecAgg` abstraction" #~ msgstr "" -#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgid "" +#~ "A fork is a personal copy of " +#~ "a GitHub repository. To create one " +#~ "for Flower, you must navigate to " +#~ "``_ (while connected " +#~ "to your GitHub account) and click " +#~ "the ``Fork`` button situated on the " +#~ "top right of the page." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "" +#~ "To check which files have been " +#~ "modified compared to the last version" +#~ " (last commit) and to see which " +#~ "files are staged for commit, you " +#~ "can use the :code:`git status` command." #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgid "" +#~ "Once you have added all the files" +#~ " you wanted to commit using " +#~ ":code:`git add`, you can finally create" +#~ " your commit using this command:" #~ msgstr "" -#~ msgid "ImageNet-2012 Image Classification" +#~ msgid "" +#~ "The \\ is there to " +#~ "explain to others what the commit " +#~ "does. It should be written in an" +#~ " imperative style and be concise. An" +#~ " example would be :code:`git commit " +#~ "-m \"Add images to README\"`." #~ msgstr "" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ ":doc:`Good first contributions `, where you" +#~ " should particularly look into the " +#~ ":code:`baselines` contributions." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "" +#~ "Flower uses :code:`pyproject.toml` to manage" +#~ " dependencies and configure development " +#~ "tools (the ones which support it). " +#~ "Poetry is a build tool which " +#~ "supports `PEP 517 " +#~ "`_." #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "" +#~ "Install `xz` (to install different " +#~ "Python versions) and `pandoc` to build" +#~ " the docs::" #~ msgstr "" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgid "" +#~ "Ensure you system (Ubuntu 22.04+) is " +#~ "up-to-date, and you have all " +#~ "necessary packages::" #~ msgstr "" -#~ msgid "TensorFlow Examples" +#~ msgid "" +#~ "1. Clone the `Flower repository " +#~ "`_ from GitHub::" #~ msgstr "" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." +#~ "Let's create the Python environment for" +#~ " all-things Flower. If you wish " +#~ "to use :code:`pyenv`, we provide two " +#~ "convenience scripts that you can use." +#~ " If you prefer using something else" +#~ " than :code:`pyenv`, create a new " +#~ "environment, activate and skip to the" +#~ " last point where all packages are" +#~ " installed." #~ msgstr "" -#~ msgid "Fashion-MNIST Image Classification" +#~ msgid "" +#~ "If you don't have :code:`pyenv` " +#~ "installed, the following script that " +#~ "will install it, set it up, and" +#~ " create the virtual environment (with " +#~ ":code:`Python 3.9.20` by default)::" #~ msgstr "" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "If you already have :code:`pyenv` " +#~ "installed (along with the :code:`pyenv-" +#~ "virtualenv` plugin), you can use the " +#~ "following convenience script (with " +#~ ":code:`Python 3.9.20` by default)::" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "" +#~ "3. Install the Flower package in " +#~ "development mode (think :code:`pip install " +#~ "-e`) along with all necessary " +#~ "dependencies::" #~ msgstr "" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "" +#~ "The Flower repository contains a number" +#~ " of convenience scripts to make " +#~ "recurring development tasks easier and " +#~ "less error-prone. See the :code:`/dev`" +#~ " subdirectory for a full list. The" +#~ " following scripts are amongst the " +#~ "most important ones:" #~ msgstr "" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ "If in a hurry, bypass the hook " +#~ "using ``--no-verify`` with the ``git " +#~ "commit`` command. ::" #~ msgstr "" -#~ msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +#~ msgid "" +#~ "Developers could run the full set " +#~ "of Github Actions workflows under their" +#~ " local environment by using `Act " +#~ "`_. Please refer to" +#~ " the installation instructions under the" +#~ " linked repository and run the next" +#~ " command under Flower main cloned " +#~ "repository folder::" #~ msgstr "" #~ msgid "" -#~ "`Flower meets KOSMoS `_." +#~ "Flower uses Poetry to build releases." +#~ " The necessary command is wrapped in" +#~ " a simple script::" #~ msgstr "" #~ msgid "" -#~ "If you want to check out " -#~ "everything put together, you should " -#~ "check out the full code example: " -#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" -#~ "huggingface](https://github.com/adap/flower/tree/main/examples" -#~ "/quickstart-huggingface)." +#~ "The resulting :code:`.whl` and :code:`.tar.gz`" +#~ " releases will be stored in the " +#~ ":code:`/dist` subdirectory." #~ msgstr "" #~ msgid "" -#~ "First of all, for running the " -#~ "Flower Python server, it is recommended" -#~ " to create a virtual environment and" -#~ " run everything within a `virtualenv " -#~ "`_. " -#~ "For the Flower client implementation in" -#~ " iOS, it is recommended to use " -#~ "Xcode as our IDE." +#~ "Flower's documentation uses `Sphinx " +#~ "`_. There's no " +#~ "convenience script to re-build the " +#~ "documentation yet, but it's pretty " +#~ "easy::" #~ msgstr "" #~ msgid "" -#~ "Since CoreML does not allow the " -#~ "model parameters to be seen before " -#~ "training, and accessing the model " -#~ "parameters during or after the training" -#~ " can only be done by specifying " -#~ "the layer name, we need to know" -#~ " this informations beforehand, through " -#~ "looking at the model specification, " -#~ "which are written as proto files. " -#~ "The implementation can be seen in " -#~ ":code:`MLModelInspect`." +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." #~ msgstr "" #~ msgid "" -#~ "After we have all of the necessary" -#~ " informations, let's create our Flower " -#~ "client." +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongise Flower," -#~ " for example, PyTorch. This tutorial " -#~ "might be removed in future versions " -#~ "of Flower." +#~ "All files are revised based on " +#~ ":doc:`Example: PyTorch - From Centralized " +#~ "To Federated `. The only thing" +#~ " to do is modifying the file " +#~ "called :code:`cifar.py`, revised part is " +#~ "shown below:" +#~ msgstr "" + +#~ msgid "" +#~ "If you have read :doc:`Example: PyTorch" +#~ " - From Centralized To Federated " +#~ "`, the following parts are " +#~ "easy to follow, only :code:`get_parameters`" +#~ " and :code:`set_parameters` function in " +#~ ":code:`client.py` needed to revise. If " +#~ "not, please read the :doc:`Example: " +#~ "PyTorch - From Centralized To Federated" +#~ " `. first." #~ msgstr "" #~ msgid "" -#~ "It is recommended to create a " -#~ "virtual environment and run everything " -#~ "within this `virtualenv `_." +#~ "Our example consists of one *server* " +#~ "and two *clients*. In FedBN, " +#~ ":code:`server.py` keeps unchanged, we can " +#~ "start the server directly." #~ msgstr "" #~ msgid "" -#~ "First of all, it is recommended to" -#~ " create a virtual environment and run" -#~ " everything within a `virtualenv " -#~ "`_." +#~ "Finally, we will revise our *client* " +#~ "logic by changing :code:`get_parameters` and" +#~ " :code:`set_parameters` in :code:`client.py`, we" +#~ " will exclude batch normalization " +#~ "parameters from model parameter list " +#~ "when sending to or receiving from " +#~ "the server." #~ msgstr "" -#~ msgid "Since we want to use scikt-learn, let's go ahead and install it:" +#~ msgid "" +#~ "Let's create a new file called " +#~ ":code:`cifar.py` with all the components " +#~ "required for a traditional (centralized) " +#~ "training on CIFAR-10. First, all " +#~ "required packages (such as :code:`torch` " +#~ "and :code:`torchvision`) need to be " +#~ "imported. You can see that we do" +#~ " not import any package for federated" +#~ " learning. You can keep all these " +#~ "imports as they are even when we" +#~ " add the federated learning components " +#~ "at a later point." #~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML `_, a popular" -#~ " image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The utility :code:`utils.load_mnist()` downloads " -#~ "the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "As already mentioned we will use " +#~ "the CIFAR-10 dataset for this machine" +#~ " learning workload. The model architecture" +#~ " (a very simple Convolutional Neural " +#~ "Network) is defined in :code:`class " +#~ "Net()`." #~ msgstr "" #~ msgid "" -#~ "Now that you have known how " -#~ "federated XGBoost work with Flower, it's" -#~ " time to run some more comprehensive" -#~ " experiments by customising the " -#~ "experimental settings. In the xgboost-" -#~ "comprehensive example (`full code " -#~ "`_), we provide more options " -#~ "to define various experimental setups, " -#~ "including aggregation strategies, data " -#~ "partitioning and centralised/distributed evaluation." -#~ " We also support `Flower simulation " -#~ "`_ making it easy to " -#~ "simulate large client cohorts in a " -#~ "resource-aware manner. Let's take a " -#~ "look!" +#~ "The :code:`load_data()` function loads the " +#~ "CIFAR-10 training and test sets. The " +#~ ":code:`transform` normalized the data after" +#~ " loading." #~ msgstr "" -#~ msgid "|31e4b1afa87c4b968327bbeafbf184d4|" +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set, measures the " +#~ "loss, backpropagates it, and then takes" +#~ " one optimizer step for each batch" +#~ " of training examples." #~ msgstr "" -#~ msgid "|c9d935b4284e4c389a33d86b33e07c0a|" +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in the function :code:`test()`. " +#~ "The function loops over all test " +#~ "samples and measures the loss of " +#~ "the model based on the test " +#~ "dataset." #~ msgstr "" -#~ msgid "|00727b5faffb468f84dd1b03ded88638|" +#~ msgid "" +#~ "The concept is easy to understand. " +#~ "We have to start a *server* and" +#~ " then use the code in " +#~ ":code:`cifar.py` for the *clients* that " +#~ "are connected to the *server*. The " +#~ "*server* sends model parameters to the" +#~ " clients. The *clients* run the " +#~ "training and update the parameters. The" +#~ " updated parameters are sent back to" +#~ " the *server* which averages all " +#~ "received parameter updates. This describes " +#~ "one round of the federated learning " +#~ "process and we repeat this for " +#~ "multiple rounds." #~ msgstr "" -#~ msgid "|daf0cf0ff4c24fd29439af78416cf47b|" +#~ msgid "" +#~ "Our example consists of one *server* " +#~ "and two *clients*. Let's set up " +#~ ":code:`server.py` first. The *server* needs" +#~ " to import the Flower package " +#~ ":code:`flwr`. Next, we use the " +#~ ":code:`start_server` function to start a " +#~ "server and tell it to perform " +#~ "three rounds of federated learning." #~ msgstr "" -#~ msgid "|9f093007080d471d94ca90d3e9fde9b6|" +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined centralized " +#~ "training in :code:`cifar.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`torch` to update the parameters " +#~ "on our PyTorch model:" #~ msgstr "" -#~ msgid "|46a26e6150e0479fbd3dfd655f36eb13|" +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`CifarClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or TensorFlow/Keras) because it " +#~ "avoids some of the boilerplate that " +#~ "would otherwise be necessary. " +#~ ":code:`CifarClient` needs to implement four" +#~ " methods, two methods for getting/setting" +#~ " model parameters, one method for " +#~ "training the model, and one method " +#~ "for testing the model:" #~ msgstr "" -#~ msgid "|3daba297595c4c7fb845d90404a6179a|" +#~ msgid ":code:`set_parameters`" #~ msgstr "" -#~ msgid "|5769874fa9c4455b80b2efda850d39d7|" +#~ msgid "" +#~ "loop over the list of model " +#~ "parameters received as NumPy :code:`ndarray`'s" +#~ " (think list of neural network " +#~ "layers)" #~ msgstr "" -#~ msgid "|ba47ffb421814b0f8f9fa5719093d839|" +#~ msgid ":code:`get_parameters`" #~ msgstr "" -#~ msgid "|aeac5bf79cbf497082e979834717e01b|" +#~ msgid "" +#~ "get the model parameters and return " +#~ "them as a list of NumPy " +#~ ":code:`ndarray`'s (which is what " +#~ ":code:`flwr.client.NumPyClient` expects)" #~ msgstr "" -#~ msgid "|ce27ed4bbe95459dba016afc42486ba2|" +#~ msgid ":code:`fit`" #~ msgstr "" -#~ msgid "|ae94a7f71dda443cbec2385751427d41|" +#~ msgid ":code:`evaluate`" #~ msgstr "" -#~ msgid "|e61fce4d43d243e7bb08bdde97d81ce6|" +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`cifar.py`. So what we really do" +#~ " here is we tell Flower through " +#~ "our :code:`NumPyClient` subclass which of " +#~ "our already defined functions to call" +#~ " for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" -#~ msgid "|08cb60859b07461588fe44e55810b050|" +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_client()` by pointing it " +#~ "at the same IP address we used " +#~ "in :code:`server.py`:" #~ msgstr "" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "``BASE_IMAGE_TAG``" - -#~ msgid "The image tag of the base image." -#~ msgstr "A tag da imagem da imagem base." - #~ msgid "" -#~ "Open the notebook ``doc/source/tutorial-" -#~ "get-started-with-flower-pytorch.ipynb``:" +#~ "\\small\n" +#~ "\\frac{∆ \\times \\sqrt{2 \\times " +#~ "\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +#~ "\n" #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -#~ "/tutorial-get-started-with-flower-" -#~ "pytorch.ipynb" +#~ "The :code:`Strategy` abstraction provides a" +#~ " method called :code:`evaluate` that can" +#~ " directly be used to evaluate the " +#~ "current global model parameters. The " +#~ "current server implementation calls " +#~ ":code:`evaluate` after parameter aggregation " +#~ "and before federated evaluation (see " +#~ "next paragraph)." #~ msgstr "" #~ msgid "" -#~ "https://colab.research.google.com/github/adap/flower/blob/branch-" -#~ "name/doc/source/tutorial-get-started-with-" -#~ "flower-pytorch.ipynb" +#~ "Client-side evaluation happens in the" +#~ " :code:`Client.evaluate` method and can be" +#~ " configured from the server side." #~ msgstr "" -#~ msgid "Virutualenv with Pyenv/Virtualenv" +#~ msgid "" +#~ ":code:`fraction_evaluate`: a :code:`float` defining" +#~ " the fraction of clients that will" +#~ " be selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1` and :code:`100` clients are " +#~ "connected to the server, then :code:`10`" +#~ " will be randomly selected for " +#~ "evaluation. If :code:`fraction_evaluate` is " +#~ "set to :code:`0.0`, federated evaluation " +#~ "will be disabled." #~ msgstr "" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ ":code:`min_evaluate_clients`: an :code:`int`: the" +#~ " minimum number of clients to be " +#~ "selected for evaluation. If " +#~ ":code:`fraction_evaluate` is set to " +#~ ":code:`0.1`, :code:`min_evaluate_clients` is set " +#~ "to 20, and :code:`100` clients are " +#~ "connected to the server, then :code:`20`" +#~ " clients will be selected for " +#~ "evaluation." #~ msgstr "" -#~ msgid "Open a PR (as shown above)" +#~ msgid "" +#~ ":code:`min_available_clients`: an :code:`int` that" +#~ " defines the minimum number of " +#~ "clients which need to be connected " +#~ "to the server before a round of" +#~ " federated evaluation can start. If " +#~ "fewer than :code:`min_available_clients` are " +#~ "connected to the server, the server " +#~ "will wait until more clients are " +#~ "connected before it continues to sample" +#~ " clients for evaluation." #~ msgstr "" -#~ msgid "How to write a good PR title" +#~ msgid "" +#~ ":code:`on_evaluate_config_fn`: a function that " +#~ "returns a configuration dictionary which " +#~ "will be sent to the selected " +#~ "clients. The function will be called " +#~ "during each round and provides a " +#~ "convenient way to customize client-side" +#~ " evaluation from the server side, for" +#~ " example, to configure the number of" +#~ " validation steps performed." #~ msgstr "" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" +#~ "Model parameters can also be evaluated" +#~ " during training. :code:`Client.fit` can " +#~ "return arbitrary evaluation results as a" +#~ " dictionary:" #~ msgstr "" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "The same :code:`Strategy`-customization approach " +#~ "can be used to aggregate custom " +#~ "evaluation results coming from individual " +#~ "clients. Clients can return custom " +#~ "metrics to the server by returning " +#~ "a dictionary:" +#~ msgstr "" + +#~ msgid "Enable node authentication in :code:`SuperLink`" #~ msgstr "" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" +#~ "To enable node authentication, first you" +#~ " need to configure SSL/TLS connections " +#~ "to secure the SuperLink<>SuperNode " +#~ "communication. You can find the complete" +#~ " guide `here `_. After " +#~ "configuring secure connections, you can " +#~ "enable client authentication in a " +#~ "long-running Flower :code:`SuperLink`. Use " +#~ "the following terminal command to start" +#~ " a Flower :code:`SuperNode` that has " +#~ "both secure connections and node " +#~ "authentication enabled:" #~ msgstr "" -#~ msgid "Implement Algorithm" +#~ msgid "" +#~ "The first flag :code:`--auth-list-" +#~ "public-keys` expects a path to a " +#~ "CSV file storing all known node " +#~ "public keys. You need to store all" +#~ " known node public keys that are " +#~ "allowed to participate in a federation" +#~ " in one CSV file (:code:`.csv`)." #~ msgstr "" -#~ msgid "Database" +#~ msgid "" +#~ "The second and third flags :code" +#~ ":`--auth-superlink-private-key` and :code" +#~ ":`--auth-superlink-public-key` expect paths" +#~ " to the server's private and public" +#~ " keys. For development purposes, you " +#~ "can generate a private and public " +#~ "key pair using :code:`ssh-keygen -t " +#~ "ecdsa -b 384`." #~ msgstr "" -#~ msgid "Add my_new_file.py to codebase" +#~ msgid "Enable node authentication in :code:`SuperNode`" #~ msgstr "" -#~ msgid "Improve code in module" +#~ msgid "" +#~ "Similar to the long-running Flower " +#~ "server (:code:`SuperLink`), you can easily " +#~ "enable node authentication in the " +#~ "long-running Flower client (:code:`SuperNode`)." +#~ " Use the following terminal command " +#~ "to start an authenticated :code:`SuperNode`:" #~ msgstr "" -#~ msgid "Change SomeModule" +#~ msgid "" +#~ "The :code:`--auth-supernode-private-key` " +#~ "flag expects a path to the node's" +#~ " private key file and the :code" +#~ ":`--auth-supernode-public-key` flag expects" +#~ " a path to the node's public " +#~ "key file. For development purposes, you" +#~ " can generate a private and public" +#~ " key pair using :code:`ssh-keygen -t" +#~ " ecdsa -b 384`." #~ msgstr "" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" +#~ "You should now have learned how to" +#~ " start a long-running Flower server" +#~ " (:code:`SuperLink`) and client " +#~ "(:code:`SuperNode`) with node authentication " +#~ "enabled. You should also know the " +#~ "significance of the private key and " +#~ "store it safely to minimize security " +#~ "risks." #~ msgstr "" -#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgid "" +#~ "The easiest way to send configuration" +#~ " values to clients is to use a" +#~ " built-in strategy like :code:`FedAvg`. " +#~ "Built-in strategies support so-called" +#~ " configuration functions. A configuration " +#~ "function is a function that the " +#~ "built-in strategy calls to get the" +#~ " configuration dictionary for the current" +#~ " round. It then forwards the " +#~ "configuration dictionary to all the " +#~ "clients selected during that round." #~ msgstr "" -#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgid "" +#~ "To make the built-in strategies " +#~ "use this function, we can pass it" +#~ " to ``FedAvg`` during initialization using" +#~ " the parameter :code:`on_fit_config_fn`:" #~ msgstr "" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgid "The :code:`FedAvg` strategy will call this function *every round*." #~ msgstr "" #~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" +#~ "This can be achieved by customizing " +#~ "an existing strategy or by " +#~ ":doc:`implementing a custom strategy from " +#~ "scratch `. " +#~ "Here's a nonsensical example that " +#~ "customizes :code:`FedAvg` by adding a " +#~ "custom ``\"hello\": \"world\"`` configuration " +#~ "key/value pair to the config dict " +#~ "of a *single client* (only the " +#~ "first client in the list, the " +#~ "other clients in this round to not" +#~ " receive this \"special\" config value):" +#~ msgstr "" + +#~ msgid "" +#~ "containing relevant information including: log" +#~ " message level (e.g. :code:`INFO`, " +#~ ":code:`DEBUG`), a timestamp, the line " +#~ "where the logging took place from, " +#~ "as well as the log message itself." +#~ " In this way, the logger would " +#~ "typically display information on your " +#~ "terminal as follows:" +#~ msgstr "" + +#~ msgid "" +#~ "By default, the Flower log is " +#~ "outputted to the terminal where you " +#~ "launch your Federated Learning workload " +#~ "from. This applies for both gRPC-" +#~ "based federation (i.e. when you do " +#~ ":code:`fl.server.start_server`) and when using " +#~ "the :code:`VirtualClientEngine` (i.e. when you" +#~ " do :code:`fl.simulation.start_simulation`). In " +#~ "some situations you might want to " +#~ "save this log to disk. You can " +#~ "do so by calling the " +#~ "`fl.common.logger.configure() " +#~ "`_" +#~ " function. For example:" +#~ msgstr "" + +#~ msgid "" +#~ "With the above, Flower will record " +#~ "the log you see on your terminal" +#~ " to :code:`log.txt`. This file will " +#~ "be created in the same directory " +#~ "as were you are running the code" +#~ " from. If we inspect we see the" +#~ " log above is also recorded but " +#~ "prefixing with :code:`identifier` each line:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`fl.common.logger.configure` function, " +#~ "also allows specifying a host to " +#~ "which logs can be pushed (via " +#~ ":code:`POST`) through a native Python " +#~ ":code:`logging.handler.HTTPHandler`. This is a " +#~ "particularly useful feature in " +#~ ":code:`gRPC`-based Federated Learning workloads " +#~ "where otherwise gathering logs from all" +#~ " entities (i.e. the server and the" +#~ " clients) might be cumbersome. Note " +#~ "that in Flower simulation, the server" +#~ " automatically displays all logs. You " +#~ "can still specify a :code:`HTTPHandler` " +#~ "should you wish to backup or " +#~ "analyze the logs somewhere else." #~ msgstr "" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" +#~ "This guide describes how to a " +#~ "SSL-enabled secure Flower server " +#~ "(:code:`SuperLink`) can be started and " +#~ "how a Flower client (:code:`SuperNode`) " +#~ "can establish a secure connections to" +#~ " it." #~ msgstr "" -#~ msgid "Changelog entry" +#~ msgid "" +#~ "The code example comes with a " +#~ ":code:`README.md` file which explains how " +#~ "to start it. Although it is " +#~ "already SSL-enabled, it might be " +#~ "less descriptive on how it does " +#~ "so. Stick to this guide for a " +#~ "deeper introduction to the topic." #~ msgstr "" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." +#~ "Using SSL-enabled connections requires " +#~ "certificates to be passed to the " +#~ "server and client. For the purpose " +#~ "of this guide we are going to " +#~ "generate self-signed certificates. As " +#~ "this can become quite complex we " +#~ "are going to ask you to run " +#~ "the script in :code:`examples/advanced-" +#~ "tensorflow/certificates/generate.sh` with the " +#~ "following command sequence:" #~ msgstr "" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" +#~ "This will generate the certificates in" +#~ " :code:`examples/advanced-tensorflow/.cache/certificates`." #~ msgstr "" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." +#~ "When setting :code:`root_certificates`, the " +#~ "client expects a file path to " +#~ "PEM-encoded root certificates." #~ msgstr "" -#~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." +#~ msgid "The :code:`Strategy` abstraction" #~ msgstr "" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." +#~ "All strategy implementation are derived " +#~ "from the abstract base class " +#~ ":code:`flwr.server.strategy.Strategy`, both built-in" +#~ " implementations and third party " +#~ "implementations. This means that custom " +#~ "strategy implementations have the exact " +#~ "same capabilities at their disposal as" +#~ " built-in ones." #~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" +#~ "Creating a new strategy means " +#~ "implementing a new :code:`class` (derived " +#~ "from the abstract base class " +#~ ":code:`Strategy`) that implements for the " +#~ "previously shown abstract methods:" #~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." +#~ msgid "The :code:`initialize_parameters` method" #~ msgstr "" -#~ msgid " is to not add the PR to the changelog" +#~ msgid "" +#~ ":code:`initialize_parameters` is called only " +#~ "once, at the very beginning of an" +#~ " execution. It is responsible for " +#~ "providing the initial global model " +#~ "parameters in a serialized form (i.e.," +#~ " as a :code:`Parameters` object)." #~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" +#~ msgid "" +#~ "Built-in strategies return user-provided" +#~ " initial parameters. The following example" +#~ " shows how initial parameters can be" +#~ " passed to :code:`FedAvg`:" #~ msgstr "" -#~ msgid " is to add a general examples change to the PR" +#~ msgid "" +#~ "The Flower server will call " +#~ ":code:`initialize_parameters`, which either returns" +#~ " the parameters that were passed to" +#~ " :code:`initial_parameters`, or :code:`None`. If" +#~ " no parameters are returned from " +#~ ":code:`initialize_parameters` (i.e., :code:`None`), " +#~ "the server will randomly select one " +#~ "client and ask it to provide its" +#~ " parameters. This is a convenience " +#~ "feature and not recommended in practice," +#~ " but it can be useful for " +#~ "prototyping. In practice, it is " +#~ "recommended to always use server-side" +#~ " parameter initialization." #~ msgstr "" -#~ msgid " is to add a general sdk change to the PR" +#~ msgid "The :code:`configure_fit` method" #~ msgstr "" -#~ msgid " is to add a general simulations change to the PR" +#~ msgid "" +#~ ":code:`configure_fit` is responsible for " +#~ "configuring the upcoming round of " +#~ "training. What does *configure* mean in" +#~ " this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_fit` makes this clear:" #~ msgstr "" -#~ msgid "Note that only one token should be used." +#~ msgid "" +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_fit`:" #~ msgstr "" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" +#~ "Use the :code:`client_manager` to randomly " +#~ "sample all (or a subset of) " +#~ "available clients (each represented as a" +#~ " :code:`ClientProxy` object)" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`FitIns` holding the current " +#~ "global model :code:`parameters` and " +#~ ":code:`config` dict" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" +#~ "More sophisticated implementations can use " +#~ ":code:`configure_fit` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to train, for example, different models" +#~ " on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." #~ msgstr "" -#~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" +#~ msgid "The :code:`aggregate_fit` method" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ ":code:`aggregate_fit` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to train in :code:`configure_fit`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_fit`). :code:`aggregate_fit` " +#~ "therefore receives a list of " +#~ ":code:`results`, but also a list of " +#~ ":code:`failures`." #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" +#~ ":code:`aggregate_fit` returns an optional " +#~ ":code:`Parameters` object and a dictionary " +#~ "of aggregated metrics. The :code:`Parameters`" +#~ " return value is optional because " +#~ ":code:`aggregate_fit` might decide that the" +#~ " results provided are not sufficient " +#~ "for aggregation (e.g., too many " +#~ "failures)." +#~ msgstr "" + +#~ msgid "The :code:`configure_evaluate` method" #~ msgstr "" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" +#~ ":code:`configure_evaluate` is responsible for " +#~ "configuring the upcoming round of " +#~ "evaluation. What does *configure* mean " +#~ "in this context? Configuring a round " +#~ "means selecting clients and deciding " +#~ "what instructions to send to these " +#~ "clients. The signature of " +#~ ":code:`configure_evaluate` makes this clear:" #~ msgstr "" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." +#~ "The return value is a list of " +#~ "tuples, each representing the instructions " +#~ "that will be sent to a particular" +#~ " client. Strategy implementations usually " +#~ "perform the following steps in " +#~ ":code:`configure_evaluate`:" #~ msgstr "" -#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgid "" +#~ "Pair each :code:`ClientProxy` with the " +#~ "same :code:`EvaluateIns` holding the current" +#~ " global model :code:`parameters` and " +#~ ":code:`config` dict" #~ msgstr "" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." +#~ "More sophisticated implementations can use " +#~ ":code:`configure_evaluate` to implement custom " +#~ "client selection logic. A client will" +#~ " only participate in a round if " +#~ "the corresponding :code:`ClientProxy` is " +#~ "included in the list returned from " +#~ ":code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ "The structure of this return value " +#~ "provides a lot of flexibility to " +#~ "the user. Since instructions are defined" +#~ " on a per-client basis, different " +#~ "instructions can be sent to each " +#~ "client. This enables custom strategies " +#~ "to evaluate, for example, different " +#~ "models on different clients, or use " +#~ "different hyperparameters on different clients" +#~ " (via the :code:`config` dict)." #~ msgstr "" -#~ msgid "MNIST Training with MXNet" +#~ msgid "The :code:`aggregate_evaluate` method" #~ msgstr "" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ ":code:`aggregate_evaluate` is responsible for " +#~ "aggregating the results returned by the" +#~ " clients that were selected and asked" +#~ " to evaluate in :code:`configure_evaluate`." #~ msgstr "" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." +#~ "Of course, failures can happen, so " +#~ "there is no guarantee that the " +#~ "server will get results from all " +#~ "the clients it sent instructions to " +#~ "(via :code:`configure_evaluate`). " +#~ ":code:`aggregate_evaluate` therefore receives a " +#~ "list of :code:`results`, but also a " +#~ "list of :code:`failures`." #~ msgstr "" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." +#~ ":code:`aggregate_evaluate` returns an optional " +#~ ":code:`float` (loss) and a dictionary of" +#~ " aggregated metrics. The :code:`float` " +#~ "return value is optional because " +#~ ":code:`aggregate_evaluate` might decide that " +#~ "the results provided are not sufficient" +#~ " for aggregation (e.g., too many " +#~ "failures)." +#~ msgstr "" + +#~ msgid "The :code:`evaluate` method" #~ msgstr "" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." +#~ ":code:`evaluate` is responsible for evaluating" +#~ " model parameters on the server-side." +#~ " Having :code:`evaluate` in addition to " +#~ ":code:`configure_evaluate`/:code:`aggregate_evaluate` enables" +#~ " strategies to perform both servers-" +#~ "side and client-side (federated) " +#~ "evaluation." #~ msgstr "" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." +#~ "The return value is again optional " +#~ "because the strategy might not need " +#~ "to implement server-side evaluation or" +#~ " because the user-defined :code:`evaluate`" +#~ " method might not complete successfully " +#~ "(e.g., it might fail to load the" +#~ " server-side evaluation data)." #~ msgstr "" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." +#~ "Stable releases are available on `PyPI" +#~ " `_::" #~ msgstr "" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` should be " +#~ "installed with the ``simulation`` extra::" #~ msgstr "" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgid "" +#~ "If you have not added ``conda-" +#~ "forge`` to your channels, you will " +#~ "first need to run the following::" #~ msgstr "" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "Once the ``conda-forge`` channel has " +#~ "been enabled, ``flwr`` can be installed" +#~ " with ``conda``::" #~ msgstr "" -#~ msgid "MXNet meets Flower" +#~ msgid "or with ``mamba``::" #~ msgstr "" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "New (possibly unstable) versions of " +#~ "Flower are sometimes available as " +#~ "pre-release versions (alpha, beta, release" +#~ " candidate) before the stable release " +#~ "happens::" #~ msgstr "" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr`` pre-releases " +#~ "should be installed with the " +#~ "``simulation`` extra::" #~ msgstr "" #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "The latest (potentially unstable) changes " +#~ "in Flower are available as nightly " +#~ "releases::" #~ msgstr "" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "For simulations that use the Virtual " +#~ "Client Engine, ``flwr-nightly`` should " +#~ "be installed with the ``simulation`` " +#~ "extra::" #~ msgstr "" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgid "You can look at everything at ``_ ." #~ msgstr "" #~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ "After you finish the visualization, stop" +#~ " Prometheus and Grafana. This is " +#~ "important as they will otherwise block," +#~ " for example port :code:`3000` on " +#~ "your machine as long as they are" +#~ " running." #~ msgstr "" #~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ "In the example above, only one " +#~ "client will be run, so your " +#~ "clients won't run concurrently. Setting " +#~ ":code:`client_num_gpus = 0.5` would allow " +#~ "running two clients and therefore enable" +#~ " them to run concurrently. Be careful" +#~ " not to require more resources than" +#~ " available. If you specified " +#~ ":code:`client_num_gpus = 2`, the simulation" +#~ " wouldn't start (even if you had " +#~ "2 GPUs but decided to set 1 " +#~ "in :code:`ray_init_args`)." #~ msgstr "" #~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ "Q: I see \"This site can't be " +#~ "reached\" when going to " +#~ "``_." #~ msgstr "" #~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" +#~ "Ray Dashboard: ``_" +#~ msgstr "" + +#~ msgid "Ray Metrics: ``_" #~ msgstr "" #~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_client " +#~ "`_) in the" +#~ " sense that they can be configure " +#~ "by creating a class inheriting, for " +#~ "example, from `flwr.client.NumPyClient `_ and therefore" +#~ " behave in an identical way. In " +#~ "addition to that, clients managed by " +#~ "the :code:`VirtualClientEngine` are:" #~ msgstr "" #~ msgid "" -#~ "This guide describes how to a " -#~ "SSL-enabled secure Flower server can " -#~ "be started and how a Flower client" -#~ " can establish a secure connections " -#~ "to it." +#~ "self-managed: this means that you " +#~ "as a user do not need to " +#~ "launch clients manually, instead this " +#~ "gets delegated to :code:`VirtualClientEngine`'s " +#~ "internals." #~ msgstr "" #~ msgid "" -#~ "The code example comes with a " -#~ "README.md file which will explain how" -#~ " to start it. Although it is " -#~ "already SSL-enabled, it might be " -#~ "less descriptive on how. Stick to " -#~ "this guide for a deeper introduction " -#~ "to the topic." +#~ "The :code:`VirtualClientEngine` implements `virtual`" +#~ " clients using `Ray `_, " +#~ "an open-source framework for scalable" +#~ " Python workloads. In particular, Flower's" +#~ " :code:`VirtualClientEngine` makes use of " +#~ "`Actors `_ to spawn `virtual` clients" +#~ " and run their workload." #~ msgstr "" #~ msgid "" -#~ "Using SSL-enabled connections requires " -#~ "certificates to be passed to the " -#~ "server and client. For the purpose " -#~ "of this guide we are going to " -#~ "generate self-signed certificates. As " -#~ "this can become quite complex we " -#~ "are going to ask you to run " -#~ "the script in :code:`examples/advanced-" -#~ "tensorflow/certificates/generate.sh`" +#~ "By default the VCE has access to" +#~ " all system resources (i.e. all CPUs," +#~ " all GPUs, etc) since that is " +#~ "also the default behavior when starting" +#~ " Ray. However, in some settings you" +#~ " might want to limit how many " +#~ "of your system resources are used " +#~ "for simulation. You can do this " +#~ "via the :code:`ray_init_args` input argument" +#~ " to :code:`start_simulation` which the VCE" +#~ " internally passes to Ray's " +#~ ":code:`ray.init` command. For a complete " +#~ "list of settings you can configure " +#~ "check the `ray.init `_ " +#~ "documentation. Do not set " +#~ ":code:`ray_init_args` if you want the " +#~ "VCE to use all your system's CPUs" +#~ " and GPUs." #~ msgstr "" -#~ msgid "with the following command sequence:" +#~ msgid "" +#~ "By default the :code:`VirtualClientEngine` " +#~ "assigns a single CPU core (and " +#~ "nothing else) to each virtual client." +#~ " This means that if your system " +#~ "has 10 cores, that many virtual " +#~ "clients can be concurrently running." +#~ msgstr "" + +#~ msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." #~ msgstr "" #~ msgid "" -#~ "The approach how the SSL certificates" -#~ " are generated in this example can" -#~ " serve as an inspiration and starting" -#~ " point but should not be taken " -#~ "as complete for production environments. " -#~ "Please refer to other sources regarding" -#~ " the issue of correctly generating " -#~ "certificates for production environments." +#~ ":code:`num_gpus` indicates the **ratio** of" +#~ " GPU memory a client gets assigned." #~ msgstr "" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." +#~ "While the :code:`client_resources` can be " +#~ "used to control the degree of " +#~ "concurrency in your FL simulation, this" +#~ " does not stop you from running " +#~ "dozens, hundreds or even thousands of" +#~ " clients in the same round and " +#~ "having orders of magnitude more " +#~ "`dormant` (i.e. not participating in a" +#~ " round) clients. Let's say you want" +#~ " to have 100 clients per round " +#~ "but your system can only accommodate " +#~ "8 clients concurrently. The " +#~ ":code:`VirtualClientEngine` will schedule 100 " +#~ "jobs to run (each simulating a " +#~ "client sampled by the strategy) and " +#~ "then will execute them in a " +#~ "resource-aware manner in batches of " +#~ "8." #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." +#~ "Flower's :code:`VirtualClientEngine` allows you " +#~ "to run FL simulations across multiple" +#~ " compute nodes. Before starting your " +#~ "multi-node simulation ensure that you:" #~ msgstr "" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "Pass :code:`ray_init_args={\"address\"=\"auto\"}` to " +#~ "`start_simulation `_ so the " +#~ ":code:`VirtualClientEngine` attaches to a " +#~ "running Ray instance." #~ msgstr "" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a client which uses the" -#~ " previously generated scripts:" +#~ "Start Ray on you head node: on " +#~ "the terminal type :code:`ray start " +#~ "--head`. This command will print a " +#~ "few lines, one of which indicates " +#~ "how to attach other nodes to the" +#~ " head node." #~ msgstr "" #~ msgid "" -#~ "When setting :code:`root_certificates`, the " -#~ "client expects the PEM-encoded root " -#~ "certificates as a byte string. We " -#~ "are again using :code:`Path` to simplify" -#~ " reading those as byte strings." +#~ "Attach other nodes to the head " +#~ "node: copy the command shown after " +#~ "starting the head and execute it " +#~ "on terminal of a new node: for " +#~ "example :code:`ray start " +#~ "--address='192.168.1.132:6379'`" #~ msgstr "" #~ msgid "" -#~ "You should now have learned how to" -#~ " generate self-signed certificates using" -#~ " the given script, start a SSL-" -#~ "enabled server, and have a client " -#~ "establish a secure connection to it." +#~ "Once your simulation is finished, if " +#~ "you'd like to dismantle your cluster " +#~ "you simply need to run the command" +#~ " :code:`ray stop` in each node's " +#~ "terminal (including the head node)." #~ msgstr "" #~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`_." +#~ "User :code:`ray status` to check all " +#~ "nodes connected to your head node " +#~ "as well as the total resources " +#~ "available to the :code:`VirtualClientEngine`." #~ msgstr "" -#~ msgid "Flower server" +#~ msgid "" +#~ "When attaching a new node to the" +#~ " head, all its resources (i.e. all" +#~ " CPUs, all GPUs) will be visible " +#~ "by the head node. This means that" +#~ " the :code:`VirtualClientEngine` can schedule " +#~ "as many `virtual` clients as that " +#~ "node can possible run. In some " +#~ "settings you might want to exclude " +#~ "certain resources from the simulation. " +#~ "You can do this by appending " +#~ "`--num-cpus=` and/or `--num-" +#~ "gpus=` in any :code:`ray " +#~ "start` command (including when starting " +#~ "the head)" #~ msgstr "" #~ msgid "" -#~ "The command will pull the Docker " -#~ "image with the tag " -#~ "``1.7.0-py3.11-ubuntu22.04`` from Docker Hub. " -#~ "The tag contains the information which" -#~ " Flower, Python and Ubuntu is used." -#~ " In this case, it uses Flower " -#~ "1.7.0, Python 3.11 and Ubuntu 22.04. " -#~ "The ``--rm`` flag tells Docker to " -#~ "remove the container after it exits." +#~ "The VCE assigns a share of GPU " +#~ "memory to a client that specifies " +#~ "the key :code:`num_gpus` in " +#~ ":code:`client_resources`. This being said, Ray" +#~ " (used internally by the VCE) is " +#~ "by default:" #~ msgstr "" #~ msgid "" -#~ "By default, the Flower server keeps " -#~ "state in-memory. When using the " -#~ "Docker flag ``--rm``, the state is " -#~ "not persisted between container starts. " -#~ "We will show below how to save " -#~ "the state in a file on your " -#~ "host system." +#~ "not aware of the total VRAM " +#~ "available on the GPUs. This means " +#~ "that if you set :code:`num_gpus=0.5` and" +#~ " you have two GPUs in your " +#~ "system with different (e.g. 32GB and " +#~ "8GB) VRAM amounts, they both would " +#~ "run 2 clients concurrently." #~ msgstr "" #~ msgid "" -#~ "The ``-p :`` flag tells " -#~ "Docker to map the ports " -#~ "``9091``/``9092`` of the host to " -#~ "``9091``/``9092`` of the container, allowing" -#~ " you to access the Driver API " -#~ "on ``http://localhost:9091`` and the Fleet " -#~ "API on ``http://localhost:9092``. Lastly, any" -#~ " flag that comes after the tag " -#~ "is passed to the Flower server. " -#~ "Here, we are passing the flag " -#~ "``--insecure``." +#~ "If you want to run several " +#~ "independent Flower simulations on the " +#~ "same machine you need to mask-out" +#~ " your GPUs with " +#~ ":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching" +#~ " your experiment." #~ msgstr "" #~ msgid "" -#~ "The ``--insecure`` flag enables insecure " -#~ "communication (using HTTP, not HTTPS) " -#~ "and should only be used for " -#~ "testing purposes. We strongly recommend " -#~ "enabling `SSL `_ when " -#~ "deploying to a production environment." +#~ "In addition, the GPU resource limits " +#~ "passed to :code:`client_resources` are not " +#~ "`enforced` (i.e. they can be exceeded)" +#~ " which can result in the situation" +#~ " of client using more VRAM than " +#~ "the ratio specified when starting the" +#~ " simulation." #~ msgstr "" #~ msgid "" -#~ "You can use ``--help`` to view all" -#~ " available flags that the server " -#~ "supports:" +#~ "This would need to be done in " +#~ "the main process (which is where " +#~ "the server would run) and in each" +#~ " Actor created by the VCE. By " +#~ "means of :code:`actor_kwargs` we can " +#~ "pass the reserved key `\"on_actor_init_fn\"`" +#~ " in order to specify a function " +#~ "to be executed upon actor " +#~ "initialization. In this case, to enable" +#~ " GPU growth for TF workloads. It " +#~ "would look as follows:" #~ msgstr "" #~ msgid "" -#~ "If you want to persist the state" -#~ " of the server on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``-v`` to" -#~ " mount the user's home directory " -#~ "(``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ "Model updates can be persisted on " +#~ "the server-side by customizing " +#~ ":code:`Strategy` methods. Implementing custom " +#~ "strategies is always an option, but " +#~ "for many cases it may be more " +#~ "convenient to simply customize an " +#~ "existing strategy. The following code " +#~ "example defines a new " +#~ ":code:`SaveModelStrategy` which customized the " +#~ "existing built-in :code:`FedAvg` strategy. " +#~ "In particular, it customizes " +#~ ":code:`aggregate_fit` by calling " +#~ ":code:`aggregate_fit` in the base class " +#~ "(:code:`FedAvg`). It then continues to " +#~ "save returned (aggregated) weights before " +#~ "it returns those aggregated weights to" +#~ " the caller (i.e., the server):" #~ msgstr "" #~ msgid "" -#~ "As soon as the server starts, the" -#~ " file ``state.db`` is created in the" -#~ " user's home directory on your host" -#~ " system. If the file already exists," -#~ " the server tries to restore the " -#~ "state from the file. To start the" -#~ " server with an empty database, " -#~ "simply remove the ``state.db`` file." +#~ "For central DP with server-side " +#~ "clipping, there are two :code:`Strategy` " +#~ "classes that act as wrappers around " +#~ "the actual :code:`Strategy` instance (for " +#~ "example, :code:`FedAvg`). The two wrapper " +#~ "classes are " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` for " +#~ "fixed and adaptive clipping." #~ msgstr "" #~ msgid "" -#~ "To enable SSL, you will need a " -#~ "CA certificate, a server certificate and" -#~ " a server private key." +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use server-" +#~ "side fixed clipping using the " +#~ ":code:`DifferentialPrivacyServerSideFixedClipping` wrapper " +#~ "class. The same approach can be " +#~ "used with " +#~ ":code:`DifferentialPrivacyServerSideAdaptiveClipping` by " +#~ "adjusting the corresponding input parameters." #~ msgstr "" #~ msgid "" -#~ "For testing purposes, you can generate" -#~ " your own self-signed certificates. " -#~ "The `Enable SSL connections " -#~ "`_ page contains " -#~ "a section that will guide you " -#~ "through the process." +#~ "For central DP with client-side " +#~ "clipping, the server sends the clipping" +#~ " value to selected clients on each" +#~ " round. Clients can use existing " +#~ "Flower :code:`Mods` to perform the " +#~ "clipping. Two mods are available for " +#~ "fixed and adaptive client-side clipping:" +#~ " :code:`fixedclipping_mod` and " +#~ ":code:`adaptiveclipping_mod` with corresponding " +#~ "server-side wrappers " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` and " +#~ ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." #~ msgstr "" #~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``-v`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the server to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the server with the " -#~ "``--certificates`` flag." +#~ "The code sample below enables the " +#~ ":code:`FedAvg` strategy to use differential" +#~ " privacy with client-side fixed " +#~ "clipping using both the " +#~ ":code:`DifferentialPrivacyClientSideFixedClipping` wrapper " +#~ "class and, on the client, " +#~ ":code:`fixedclipping_mod`:" #~ msgstr "" -#~ msgid "Using a different Flower or Python version" +#~ msgid "" +#~ "In addition to the server-side " +#~ "strategy wrapper, the :code:`ClientApp` needs" +#~ " to configure the matching " +#~ ":code:`fixedclipping_mod` to perform the " +#~ "client-side clipping:" +#~ msgstr "" + +#~ msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" #~ msgstr "" #~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower or Python, you " -#~ "can do so by changing the tag. " -#~ "All versions we provide are available" -#~ " on `Docker Hub " -#~ "`_." +#~ "Flower allows full customization of the" +#~ " learning process through the " +#~ ":code:`Strategy` abstraction. A number of " +#~ "built-in strategies are provided in " +#~ "the core framework." +#~ msgstr "" + +#~ msgid "Use an existing strategy, for example, :code:`FedAvg`" #~ msgstr "" #~ msgid "" -#~ "The following command returns the " -#~ "current image hash referenced by the " -#~ "``server:1.7.0-py3.11-ubuntu22.04`` tag:" +#~ "This creates a strategy with all " +#~ "parameters left at their default values" +#~ " and passes it to the " +#~ ":code:`start_server` function. It is usually" +#~ " recommended to adjust a few " +#~ "parameters during instantiation:" #~ msgstr "" -#~ msgid "Next, we can pin the hash when running a new server container:" +#~ msgid "" +#~ "The server can pass new configuration" +#~ " values to the client each round " +#~ "by providing a function to " +#~ ":code:`on_fit_config_fn`. The provided function " +#~ "will be called by the strategy and" +#~ " must return a dictionary of " +#~ "configuration key values pairs that will" +#~ " be sent to the client. It must" +#~ " return a dictionary of arbitrary " +#~ "configuration values :code:`client.fit` and " +#~ ":code:`client.evaluate` functions during each " +#~ "round of federated learning." #~ msgstr "" #~ msgid "" -#~ "QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow " -#~ "` | :doc:`🤗 " -#~ "Transformers ` " -#~ "| :doc:`JAX ` |" -#~ " :doc:`Pandas ` " -#~ "| :doc:`fastai `" -#~ " | :doc:`PyTorch Lightning ` | :doc:`MXNet " -#~ "` | :doc" -#~ ":`scikit-learn `" -#~ " | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +#~ "The :code:`on_fit_config_fn` can be used " +#~ "to pass arbitrary configuration values " +#~ "from server to client, and potentially" +#~ " change these values each round, for" +#~ " example, to adjust the learning " +#~ "rate. The client will receive the " +#~ "dictionary returned by the " +#~ ":code:`on_fit_config_fn` in its own " +#~ ":code:`client.fit()` function." #~ msgstr "" -#~ msgid "flower-driver-api" +#~ msgid "" +#~ "Similar to :code:`on_fit_config_fn`, there is" +#~ " also :code:`on_evaluate_config_fn` to customize" +#~ " the configuration sent to " +#~ ":code:`client.evaluate()`" #~ msgstr "" -#~ msgid "flower-fleet-api" +#~ msgid "" +#~ "Server-side evaluation can be enabled" +#~ " by passing an evaluation function to" +#~ " :code:`evaluate_fn`." #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, :py:class:`str`, " -#~ ":py:class:`bytes`, :py:class:`bool`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`str`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bytes`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_error_reply " -#~ "`\\ \\(error\\, " -#~ "ttl\\)" +#~ "The following examples are available as" +#~ " standalone projects. Quickstart TensorFlow/Keras" +#~ " ---------------------------" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_reply `\\ " -#~ "\\(content\\, ttl\\)" +#~ "Let's create a new application project" +#~ " in Xcode and add :code:`flwr` as " +#~ "a dependency in your project. For " +#~ "our application, we will store the " +#~ "logic of our app in " +#~ ":code:`FLiOSModel.swift` and the UI elements" +#~ " in :code:`ContentView.swift`. We will " +#~ "focus more on :code:`FLiOSModel.swift` in " +#~ "this quickstart. Please refer to the " +#~ "`full code example " +#~ "`_ to " +#~ "learn more about the app." +#~ msgstr "" + +#~ msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" #~ msgstr "" #~ msgid "" -#~ "Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -#~ "[:py:class:`str`, :py:obj:`~typing.Union`\\ " -#~ "[:py:class:`int`, :py:class:`float`, " -#~ ":py:class:`~typing.List`\\ [:py:class:`int`], " -#~ ":py:class:`~typing.List`\\ [:py:class:`float`]]]" +#~ "Then add the mlmodel to the " +#~ "project simply by drag-and-drop, " +#~ "the mlmodel will be bundled inside " +#~ "the application during deployment to " +#~ "your iOS device. We need to pass" +#~ " the url to access mlmodel and " +#~ "run CoreML machine learning processes, " +#~ "it can be retrieved by calling the" +#~ " function :code:`Bundle.main.url`. For the " +#~ "MNIST dataset, we need to preprocess " +#~ "it into :code:`MLBatchProvider` object. The" +#~ " preprocessing is done inside " +#~ ":code:`DataLoader.swift`." #~ msgstr "" -#~ msgid "Run Flower server (Driver API and Fleet API)." +#~ msgid "" +#~ "Since CoreML does not allow the " +#~ "model parameters to be seen before " +#~ "training, and accessing the model " +#~ "parameters during or after the training" +#~ " can only be done by specifying " +#~ "the layer name, we need to know" +#~ " this information beforehand, through " +#~ "looking at the model specification, " +#~ "which are written as proto files. " +#~ "The implementation can be seen in " +#~ ":code:`MLModelInspect`." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" -#~ msgstr "" - -#~ msgid "Start a Flower Driver API server." +#~ "Then start the Flower gRPC client " +#~ "and start communicating to the server" +#~ " by passing our Flower client to " +#~ "the function :code:`startFlwrGRPC`." #~ msgstr "" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" -#~ msgstr "" - -#~ msgid "`Driver` class provides an interface to the Driver API." +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ "call the provided :code:`MLFlwrClient` and " +#~ "call :code:`startFlwrGRPC()`. The attribute " +#~ ":code:`hostname` and :code:`port` tells the" +#~ " client which server to connect to." +#~ " This can be done by entering " +#~ "the hostname and port in the " +#~ "application before clicking the start " +#~ "button to start the federated learning" +#~ " process." #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." +#~ "For simple workloads we can start " +#~ "a Flower server and leave all the" +#~ " configuration possibilities at their " +#~ "default values. In a file named " +#~ ":code:`server.py`, import Flower and start " +#~ "the server:" #~ msgstr "" -#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system in your ios device. The " +#~ "full `source code " +#~ "`_ for" +#~ " this example can be found in " +#~ ":code:`examples/ios`." #~ msgstr "" -#~ msgid "Disconnect from the SuperLink if connected." +#~ msgid "" +#~ "Before we start building our JAX " +#~ "example, we need install the packages" +#~ " :code:`jax`, :code:`jaxlib`, :code:`scikit-" +#~ "learn`, and :code:`flwr`:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`create_message `\\" -#~ " \\(content\\, message\\_type\\, ...\\)" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Linear Regression` model. " +#~ "If you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `JAX" +#~ " documentation `_." #~ msgstr "" #~ msgid "" -#~ "Time-to-live for the round trip" -#~ " of this message, i.e., the time " -#~ "from sending this message to receiving" -#~ " a reply. It specifies the duration" -#~ " for which the message and its " -#~ "potential reply are considered valid." +#~ "Let's create a new file called " +#~ ":code:`jax_training.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) linear regression training. " +#~ "First, the JAX packages :code:`jax` and" +#~ " :code:`jaxlib` need to be imported. " +#~ "In addition, we need to import " +#~ ":code:`sklearn` since we use " +#~ ":code:`make_regression` for the dataset and" +#~ " :code:`train_test_split` to split the " +#~ "dataset into a training and test " +#~ "set. You can see that we do " +#~ "not yet import the :code:`flwr` package" +#~ " for federated learning. This will be" +#~ " done later." #~ msgstr "" -#~ msgid "start\\_driver" +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "mentioned training and test sets." #~ msgstr "" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." +#~ "The model architecture (a very simple" +#~ " :code:`Linear Regression` model) is " +#~ "defined in :code:`load_model()`." #~ msgstr "" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." +#~ "We now need to define the training" +#~ " (function :code:`train()`), which loops " +#~ "over the training set and measures " +#~ "the loss (function :code:`loss_fn()`) for " +#~ "each batch of training examples. The " +#~ "loss function is separate since JAX " +#~ "takes derivatives with a :code:`grad()` " +#~ "function (defined in the :code:`main()` " +#~ "function and called in :code:`train()`)." #~ msgstr "" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "The evaluation of the model is " +#~ "defined in the function :code:`evaluation()`." +#~ " The function takes all test examples" +#~ " and measures the loss of the " +#~ "linear regression model." #~ msgstr "" -#~ msgid "The Driver object to use." +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model using JAX. As " +#~ "already mentioned, the :code:`jax.grad()` " +#~ "function is defined in :code:`main()` " +#~ "and passed to :code:`train()`." #~ msgstr "" -#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgid "" +#~ "The concept of federating an existing" +#~ " workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`jax_training.py` for the" +#~ " *clients* that are connected to the" +#~ " *server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server*, which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process, and we " +#~ "repeat this for multiple rounds." #~ msgstr "" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined JAX training" +#~ " in :code:`jax_training.py`. Our *client* " +#~ "needs to import :code:`flwr`, but also" +#~ " :code:`jax` and :code:`jaxlib` to update" +#~ " the parameters on our JAX model:" #~ msgstr "" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`FlowerClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "JAX) because it avoids some of the" +#~ " boilerplate that would otherwise be " +#~ "necessary. :code:`FlowerClient` needs to " +#~ "implement four methods, two methods for" +#~ " getting/setting model parameters, one " +#~ "method for training the model, and " +#~ "one method for testing the model:" #~ msgstr "" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid ":code:`set_parameters (optional)`" #~ msgstr "" -#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgid "transform parameters to NumPy :code:`ndarray`'s" #~ msgstr "" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." -#~ msgstr "" - -#~ msgid "Quickstart MXNet" +#~ "The challenging part is to transform " +#~ "the JAX model parameters from " +#~ ":code:`DeviceArray` to :code:`NumPy ndarray` " +#~ "to make them compatible with " +#~ "`NumPyClient`." #~ msgstr "" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`evaluate()` previously defined in " +#~ ":code:`jax_training.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." #~ msgstr "" #~ msgid "" #~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." -#~ msgstr "" - -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ "how to train a :code:`Logistic " +#~ "Regression` model on MNIST using Flower" +#~ " and scikit-learn." #~ msgstr "" #~ msgid "" #~ "Now that we have all our " #~ "dependencies installed, let's run a " #~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "clients and one server. However, before" +#~ " setting up the client and server," +#~ " we will define all functionalities " +#~ "that we need for our federated " +#~ "learning setup within :code:`utils.py`. The" +#~ " :code:`utils.py` contains different functions" +#~ " defining all the machine learning " +#~ "basics:" #~ msgstr "" -#~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" +#~ msgid ":code:`get_model_parameters()`" #~ msgstr "" -#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" #~ msgstr "" -#~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ msgid ":code:`set_model_params()`" #~ msgstr "" -#~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." +#~ msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" +#~ msgstr "" + +#~ msgid ":code:`set_initial_params()`" #~ msgstr "" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." +#~ "Please check out :code:`utils.py` `here " +#~ "`_ for more details. " +#~ "The pre-defined functions are used " +#~ "in the :code:`client.py` and imported. " +#~ "The :code:`client.py` also requires to " +#~ "import several packages such as Flower" +#~ " and scikit-learn:" #~ msgstr "" #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." +#~ "Prior to local training, we need " +#~ "to load the MNIST dataset, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning, " +#~ "and partition the dataset for FL. " +#~ "This can be conveniently achieved using" +#~ " `Flower Datasets `_." +#~ " The :code:`FederatedDataset.load_partition()` method" +#~ " loads the partitioned training set " +#~ "for each partition ID defined in " +#~ "the :code:`--partition-id` argument." #~ msgstr "" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgid "" +#~ "Next, the logistic regression model is" +#~ " defined and initialized with " +#~ ":code:`utils.set_initial_params()`." #~ msgstr "" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to fit the logistic " +#~ "regression we defined earlier)." #~ msgstr "" #~ msgid "" #~ "Flower provides a convenience class " #~ "called :code:`NumPyClient` which makes it " #~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "interface when your workload uses " +#~ "scikit-learn. Implementing :code:`NumPyClient` " +#~ "usually means defining the following " +#~ "methods (:code:`set_parameters` is optional " +#~ "though):" #~ msgstr "" -#~ msgid "They can be implemented in the following way:" +#~ msgid ":code:`set_parameters` (optional)" +#~ msgstr "" + +#~ msgid "is directly imported with :code:`utils.set_model_params()`" #~ msgstr "" #~ msgid "" #~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " +#~ " our class :code:`MnistClient` and add " #~ "one line to actually run this " #~ "client:" #~ msgstr "" @@ -24435,8 +32218,10 @@ msgstr "" #~ "That's it for the client. We only" #~ " have to implement :code:`Client` or " #~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " #~ ":code:`\"0.0.0.0:8080\"` tells the client " #~ "which server to connect to. In our" #~ " case we can run the server and" @@ -24450,459 +32235,532 @@ msgstr "" #~ "client." #~ msgstr "" +#~ msgid ":code:`server.py`, import Flower and start the server:" +#~ msgstr "" + #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" +#~ "The number of federated learning rounds" +#~ " is set in :code:`fit_round()` and " +#~ "the evaluation is defined in " +#~ ":code:`get_evaluate_fn()`. The evaluation function" +#~ " is called after each federated " +#~ "learning round and gives you information" +#~ " about loss and accuracy. Note that" +#~ " we also make use of Flower " +#~ "Datasets here to load the test " +#~ "split of the MNIST dataset for " +#~ "server-side evaluation." +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`main` contains the server-" +#~ "side parameter initialization " +#~ ":code:`utils.set_initial_params()` as well as " +#~ "the aggregation strategy " +#~ ":code:`fl.server.strategy:FedAvg()`. The strategy is" +#~ " the default one, federated averaging " +#~ "(or FedAvg), with two clients and " +#~ "evaluation after each federated learning " +#~ "round. The server can be started " +#~ "with the command " +#~ ":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +#~ "strategy=strategy, " +#~ "config=fl.server.ServerConfig(num_rounds=3))`." #~ msgstr "" #~ msgid "" #~ "Congratulations! You've successfully built and" #~ " run your first federated learning " #~ "system. The full `source code " -#~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "`_ for this example can " +#~ "be found in :code:`examples/sklearn-logreg-" +#~ "mnist`." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a federated XGBoost model" +#~ " on HIGGS dataset using Flower and" +#~ " :code:`xgboost` package. We use a " +#~ "simple example (`full code xgboost-" +#~ "quickstart `_) with two *clients* " +#~ "and one *server* to demonstrate how " +#~ "federated XGBoost works, and then we " +#~ "dive into a more complex example " +#~ "(`full code xgboost-comprehensive " +#~ "`_) to run various experiments." #~ msgstr "" -#~ msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +#~ msgid "" +#~ "Since we want to use :code:`xgboost` " +#~ "package to build up XGBoost trees, " +#~ "let's go ahead and install " +#~ ":code:`xgboost`:" #~ msgstr "" -#~ msgid ":code:`load_mnist()`" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import xgboost, Flower, Flower Datasets " +#~ "and other related functions:" #~ msgstr "" -#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgid "" +#~ "In this example, we split the " +#~ "dataset into 30 partitions with uniform" +#~ " distribution (:code:`IidPartitioner(num_partitions=30)`)." +#~ " Then, we load the partition for " +#~ "the given client based on " +#~ ":code:`partition_id`:" #~ msgstr "" -#~ msgid ":code:`shuffle()`" +#~ msgid "" +#~ "After that, we do train/test splitting" +#~ " on the given partition (client's " +#~ "local data), and transform data format" +#~ " for :code:`xgboost` package." #~ msgstr "" -#~ msgid "Shuffles data and its label" +#~ msgid "" +#~ "The functions of :code:`train_test_split` and" +#~ " :code:`transform_dataset_to_dmatrix` are defined " +#~ "as below:" #~ msgstr "" -#~ msgid ":code:`partition()`" +#~ msgid "" +#~ "The :code:`num_local_round` represents the " +#~ "number of iterations for local tree " +#~ "boost. We use CPU for the training" +#~ " in default. One can shift it " +#~ "to GPU by setting :code:`tree_method` to" +#~ " :code:`gpu_hist`. We use AUC as " +#~ "evaluation metric." #~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" +#~ msgid "" +#~ "After loading the dataset we define " +#~ "the Flower client. We follow the " +#~ "general rule to define :code:`XgbClient` " +#~ "class inherited from :code:`fl.client.Client`." #~ msgstr "" #~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." #~ msgstr "" #~ msgid "" -#~ "The number of federated learning rounds" -#~ " is set in :code:`fit_round()` and " -#~ "the evaluation is defined in " -#~ ":code:`get_evaluate_fn()`. The evaluation function" -#~ " is called after each federated " -#~ "learning round and gives you information" -#~ " about loss and accuracy." +#~ "Then, we override :code:`get_parameters`, " +#~ ":code:`fit` and :code:`evaluate` methods " +#~ "insides :code:`XgbClient` class as follows." #~ msgstr "" -#~ msgid "Let's get stated!" +#~ msgid "" +#~ "Unlike neural network training, XGBoost " +#~ "trees are not started from a " +#~ "specified random weights. In this case," +#~ " we do not use :code:`get_parameters` " +#~ "and :code:`set_parameters` to initialise model" +#~ " parameters for XGBoost. As a result," +#~ " let's return an empty tensor in " +#~ ":code:`get_parameters` when it is called " +#~ "by the server at the first round." #~ msgstr "" #~ msgid "" -#~ "We now have a list of ten " -#~ "training sets and ten validation sets" -#~ " (``trainloaders`` and ``valloaders``) " -#~ "representing the data of ten different" -#~ " organizations. Each ``trainloader``/``valloader`` " -#~ "pair contains 4500 training examples and" -#~ " 500 validation examples. There's also " -#~ "a single ``testloader`` (we did not " -#~ "split the test set). Again, this " -#~ "is only necessary for building research" -#~ " or educational systems, actual federated" -#~ " learning systems have their data " -#~ "naturally distributed across multiple " -#~ "partitions." +#~ "In :code:`fit`, at the first round, " +#~ "we call :code:`xgb.train()` to build up" +#~ " the first set of trees. From " +#~ "the second round, we load the " +#~ "global model sent from server to " +#~ "new build Booster object, and then " +#~ "update model weights on local training" +#~ " data with function :code:`local_boost` as" +#~ " follows:" +#~ msgstr "" + +#~ msgid "" +#~ "Given :code:`num_local_round`, we update trees" +#~ " by calling :code:`bst_input.update` method. " +#~ "After training, the last " +#~ ":code:`N=num_local_round` trees will be " +#~ "extracted to send to the server." +#~ msgstr "" + +#~ msgid "" +#~ "In :code:`evaluate`, after loading the " +#~ "global model, we call :code:`bst.eval_set` " +#~ "function to conduct evaluation on valid" +#~ " set. The AUC value will be " +#~ "returned." +#~ msgstr "" + +#~ msgid "" +#~ "Now, we can create an instance of" +#~ " our class :code:`XgbClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` and " +#~ "call :code:`fl.client.start_client()`. The string" +#~ " :code:`\"[::]:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "" +#~ "In a file named :code:`server.py`, " +#~ "import Flower and FedXgbBagging from " +#~ ":code:`flwr.server.strategy`." #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "" +#~ "We use two clients for this " +#~ "example. An :code:`evaluate_metrics_aggregation` " +#~ "function is defined to collect and " +#~ "wighted average the AUC values from " +#~ "clients. The :code:`config_func` function is" +#~ " to return the current FL round " +#~ "number to client's :code:`fit()` and " +#~ ":code:`evaluate()` methods." #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "" +#~ "In file :code:`flwr.server.strategy.fedxgb_bagging.py`," +#~ " we define :code:`FedXgbBagging` inherited " +#~ "from :code:`flwr.server.strategy.FedAvg`. Then, we" +#~ " override the :code:`aggregate_fit`, " +#~ ":code:`aggregate_evaluate` and :code:`evaluate` " +#~ "methods as follows:" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "" +#~ "In :code:`aggregate_fit`, we sequentially " +#~ "aggregate the clients' XGBoost trees by" +#~ " calling :code:`aggregate()` function:" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "" +#~ "In this function, we first fetch " +#~ "the number of trees and the number" +#~ " of parallel trees for the current" +#~ " and previous model by calling " +#~ ":code:`_get_tree_nums`. Then, the fetched " +#~ "information will be aggregated. After " +#~ "that, the trees (containing model " +#~ "weights) are aggregated to generate a" +#~ " new tree model." #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated XGBoost " +#~ "system. The AUC values can be " +#~ "checked in :code:`metrics_distributed`. One " +#~ "can see that the average AUC " +#~ "increases over FL rounds." #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "" +#~ "The full `source code " +#~ "`_ for this example can be" +#~ " found in :code:`examples/xgboost-quickstart`." #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "" +#~ "To do this, we first customise a" +#~ " :code:`ClientManager` in :code:`server_utils.py`:" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "" +#~ "The customised :code:`ClientManager` samples " +#~ "all available clients in each FL " +#~ "round based on the order of " +#~ "connection to the server. Then, we " +#~ "define a new strategy :code:`FedXgbCyclic` " +#~ "in :code:`flwr.server.strategy.fedxgb_cyclic.py`, in " +#~ "order to sequentially select only one" +#~ " client in given round and pass " +#~ "the received model to next client." #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "" +#~ "Unlike the original :code:`FedAvg`, we " +#~ "don't perform aggregation here. Instead, " +#~ "we just make a copy of the " +#~ "received client model as global model" +#~ " by overriding :code:`aggregate_fit`." #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "" +#~ "Also, the customised :code:`configure_fit` and" +#~ " :code:`configure_evaluate` methods ensure the" +#~ " clients to be sequentially selected " +#~ "given FL round:" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "" +#~ "In :code:`dataset.py`, we have a " +#~ "function :code:`instantiate_partitioner` to " +#~ "instantiate the data partitioner based " +#~ "on the given :code:`num_partitions` and " +#~ ":code:`partitioner_type`. Currently, we provide " +#~ "four supported partitioner type to " +#~ "simulate the uniformity/non-uniformity in " +#~ "data quantity (uniform, linear, square, " +#~ "exponential)." #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "" +#~ "To facilitate centralised evaluation, we " +#~ "define a function in :code:`server_utils.py`:" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "" +#~ "This function returns a evaluation " +#~ "function which instantiates a :code:`Booster`" +#~ " object and loads the global model" +#~ " weights to it. The evaluation is " +#~ "conducted by calling :code:`eval_set()` " +#~ "method, and the tested AUC value " +#~ "is reported." #~ msgstr "" #~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ "As for distributed evaluation on the " +#~ "clients, it's same as the quick-" +#~ "start example by overriding the " +#~ ":code:`evaluate()` method insides the " +#~ ":code:`XgbClient` class in :code:`client_utils.py`." #~ msgstr "" -#~ "Atualmente, Flower fornece duas imagens, " -#~ "uma imagem base e uma imagem de" -#~ " servidor. Também haverá uma imagem " -#~ "de cliente em breve. A imagem " -#~ "base, como o nome sugere, contém " -#~ "dependências básicas que tanto o " -#~ "servidor quanto o cliente precisam. Isso" -#~ " inclui dependências do sistema, Python " -#~ "e ferramentas Python. A imagem do " -#~ "servidor é baseada na imagem base, " -#~ "mas também instala o servidor Flower " -#~ "usando ``pip```." - -#~ msgid "``3.11``" -#~ msgstr "``3.11``" - -#~ msgid "Defaults to ``22.04``." -#~ msgstr "Como padrão ``22.04``." - -#~ msgid "Building the SuperLink image" -#~ msgstr "Construindo a imagem do servidor" - -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "Pré-definido para ``flwr/server``." - -#~ msgid "The Python version of the base image." -#~ msgstr "O nome do repositório da imagem base." - -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "Como padrão ``22.04``." -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "Pré-definido para ``py3.11-ubuntu22.04``." - -#~ msgid "The PyPI package to install." +#~ msgid "" +#~ "We also provide an example code " +#~ "(:code:`sim.py`) to use the simulation " +#~ "capabilities of Flower to simulate " +#~ "federated XGBoost training on either a" +#~ " single machine or a cluster of " +#~ "machines." #~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "Pré-definido para ``flwr/server``." - #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." +#~ "After importing all required packages, " +#~ "we define a :code:`main()` function to" +#~ " perform the simulation process:" #~ msgstr "" -#~ "O nome da imagem é ``flwr_server`` " -#~ "e a tag ``0.1.0``. Lembre-se que" -#~ " os argumentos de compilação, bem " -#~ "como o nome e a tag podem " -#~ "ser adaptados às suas necessidades. " -#~ "Esses valores servem apenas como " -#~ "exemplos." - -#~ msgid "Creating New Messages" -#~ msgstr "Criando novas mensagens" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." +#~ "We first load the dataset and " +#~ "perform data partitioning, and the " +#~ "pre-processed data is stored in a " +#~ ":code:`list`. After the simulation begins, " +#~ "the clients won't need to pre-" +#~ "process their partitions again." #~ msgstr "" #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." +#~ "After that, we start the simulation " +#~ "by calling :code:`fl.simulation.start_simulation`:" #~ msgstr "" -#~ msgid "Server's side:" +#~ msgid "" +#~ "One of key parameters for " +#~ ":code:`start_simulation` is :code:`client_fn` which" +#~ " returns a function to construct a" +#~ " client. We define it as follows:" #~ msgstr "" -#~ msgid "Client's side:" +#~ msgid "" +#~ "In :code:`utils.py`, we define the " +#~ "arguments parsers for clients, server " +#~ "and simulation, allowing users to " +#~ "specify different experimental settings. Let's" +#~ " first see the sever side:" #~ msgstr "" #~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" +#~ "This allows user to specify training " +#~ "strategies / the number of total " +#~ "clients / FL rounds / participating " +#~ "clients / clients for evaluation, and" +#~ " evaluation fashion. Note that with " +#~ ":code:`--centralised-eval`, the sever will " +#~ "do centralised evaluation and all " +#~ "functionalities for client evaluation will " +#~ "be disabled." #~ msgstr "" -#~ msgid "Message Types for Protocol Buffers" +#~ msgid "" +#~ "This defines various options for client" +#~ " data partitioning. Besides, clients also" +#~ " have an option to conduct evaluation" +#~ " on centralised test set by setting" +#~ " :code:`--centralised-eval`, as well as " +#~ "an option to perform scaled learning " +#~ "rate based on the number of " +#~ "clients by setting :code:`--scaled-lr`." #~ msgstr "" #~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ "The full `code " +#~ "`_ for this comprehensive " +#~ "example can be found in :code:`examples" +#~ "/xgboost-comprehensive`." #~ msgstr "" -#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" #~ msgstr "" -#~ msgid "Within the ClientMessage block:" +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" #~ msgstr "" -#~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" #~ msgstr "" -#~ msgid "Once that is done, we will compile the file with:" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" #~ msgstr "" -#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ msgid "Serialization and Deserialization Functions" +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" #~ msgstr "" -#~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ msgid "|a80714782dde439ab73936518f91fc3c|" #~ msgstr "" -#~ msgid "The four functions:" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" #~ msgstr "" -#~ msgid "Sending the Message from the Server" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" #~ msgstr "" -#~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" #~ msgstr "" -#~ msgid "Receiving the Message by the Client" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" #~ msgstr "" -#~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ msgid "Within the handle function:" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" #~ msgstr "" -#~ msgid "And add a new function:" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" #~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" #~ msgstr "" -#~ msgid "" -#~ "The simplest way to get started " -#~ "with Flower is by using the " -#~ "pre-made Docker images, which you can" -#~ " find on `Docker Hub " -#~ "`__." +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" #~ msgstr "" -#~ msgid "" -#~ "If you want to persist the state" -#~ " of the SuperLink on your host " -#~ "system, all you need to do is " -#~ "specify a path where you want to" -#~ " save the file on your host " -#~ "system and a name for the database" -#~ " file. In the example below, we " -#~ "tell Docker via the flag ``--volume``" -#~ " to mount the user's home directory" -#~ " (``~/`` on your host) into the " -#~ "``/app/`` directory of the container. " -#~ "Furthermore, we use the flag " -#~ "``--database`` to specify the name of" -#~ " the database file." +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ msgid "" -#~ "As soon as the SuperLink starts, " -#~ "the file ``state.db`` is created in " -#~ "the user's home directory on your " -#~ "host system. If the file already " -#~ "exists, the SuperLink tries to restore" -#~ " the state from the file. To " -#~ "start the SuperLink with an empty " -#~ "database, simply remove the ``state.db`` " -#~ "file." +#~ msgid "|d62da263071d45a496f543e41fce3a19|" #~ msgstr "" -#~ msgid "" -#~ "Assuming all files we need are in" -#~ " the local ``certificates`` directory, we" -#~ " can use the flag ``--volume`` to " -#~ "mount the local directory into the " -#~ "``/app/`` directory of the container. " -#~ "This allows the SuperLink to access " -#~ "the files within the container. Finally," -#~ " we pass the names of the " -#~ "certificates to the SuperLink with the" -#~ " ``--certificates`` flag." +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" #~ msgstr "" -#~ msgid "" -#~ "``--server 192.168.1.100:9092``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Fleet" +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" #~ msgstr "" -#~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the SuperNode to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" #~ msgstr "" -#~ msgid "" -#~ "``--server 192.168.1.100:9091``: This option " -#~ "specifies the address of the SuperLinks" -#~ " Driver" +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" #~ msgstr "" -#~ msgid "" -#~ "Assuming the certificate already exists " -#~ "locally, we can use the flag " -#~ "``--volume`` to mount the local " -#~ "certificate into the container's ``/app/`` " -#~ "directory. This allows the ServerApp to" -#~ " access the certificate within the " -#~ "container. Use the ``--certificates`` flag " -#~ "when starting the container." +#~ msgid "|e6ca84e1df244f238288a768352678e5|" #~ msgstr "" -#~ msgid "" -#~ "If you want to use a different " -#~ "version of Flower, for example Flower" -#~ " nightly, you can do so by " -#~ "changing the tag. All available versions" -#~ " are on `Docker Hub " -#~ "`__." +#~ msgid "|39c2422082554a21963baffb33a0d057|" #~ msgstr "" -#~ msgid "" -#~ "Here's another example to start with " -#~ "HTTPS. Use the ``--certificates`` command " -#~ "line argument to pass paths to (CA" -#~ " certificate, server certificate, and " -#~ "server private key)." +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" #~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" #~ msgstr "" -#~ msgid "Run Flower server (Driver API)." +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" #~ msgstr "" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" #~ msgstr "" -#~ msgid "Run Flower server (Fleet API)." +#~ msgid "" +#~ "If you don't have ``pyenv`` installed," +#~ " the following script that will " +#~ "install it, set it up, and create" +#~ " the virtual environment (with ``Python " +#~ "3.9.20`` by default):" #~ msgstr "" -#~ msgid "Unreleased" +#~ msgid "" +#~ "If you already have ``pyenv`` installed" +#~ " (along with the ``pyenv-virtualenv`` " +#~ "plugin), you can use the following " +#~ "convenience script (with ``Python 3.9.20`` " +#~ "by default):" #~ msgstr "" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" #~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" #~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" #~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" #~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" #~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" #~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" #~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" #~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" #~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" #~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" #~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" #~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" #~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|e7cec00a114b48359935c6510595132e|" #~ msgstr "" diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index e9279db19043..a1598faa0ee4 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-06-17 16:09+0200\n" +"POT-Creation-Date: 2024-10-10 00:29+0000\n" "PO-Revision-Date: 2024-06-12 10:09+0000\n" "Last-Translator: Yan Gao \n" "Language: zh_Hans\n" @@ -17,44 +17,199 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" -#: ../../source/contributor-explanation-architecture.rst:2 -msgid "Flower Architecture" -msgstr "Flower的架构" +#: ../../source/contributor-explanation-public-and-private-apis.rst:2 +msgid "Public and private APIs" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:4 +msgid "" +"In Python, everything is public. To enable developers to understand which" +" components can be relied upon, Flower declares a public API. Components " +"that are part of the public API can be relied upon. Changes to the public" +" API are announced in the release notes and are subject to deprecation " +"policies." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:9 +msgid "" +"Everything that is not part of the public API is part of the private API." +" Even though Python allows accessing them, user code should never use " +"those components. Private APIs can change at any time, even in patch " +"releases." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:13 +msgid "" +"How can you determine whether a component is part of the public API or " +"not? Easy:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:15 +msgid "`Use the Flower API reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:16 +msgid "`Use the Flower CLI reference documentation `_" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:18 +msgid "" +"Everything listed in the reference documentation is part of the public " +"API. This document explains how Flower maintainers define the public API " +"and how you can determine whether a component is part of the public API " +"or not by reading the Flower source code." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:23 +#, fuzzy +msgid "Flower public API" +msgstr "Flower 客户端。" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:25 +msgid "Flower has a well-defined public API. Let's look at this in more detail." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:29 +msgid "" +"Every component that is reachable by recursively following " +"``__init__.__all__`` starting from the root package (``flwr``) is part of" +" the public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:32 +msgid "" +"If you want to determine whether a component " +"(class/function/generator/...) is part of the public API or not, you need" +" to start at the root of the ``flwr`` package. Let's use ``tree -L 1 -d " +"src/py/flwr`` to look at the Python sub-packages contained ``flwr``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:46 +msgid "" +"Contrast this with the definition of ``__all__`` in the root " +"``src/py/flwr/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:59 +msgid "" +"You can see that ``flwr`` has six subpackages (``cli``, ``client``, " +"``common``, ``proto``, ``server``, ``simulation``), but only four of them" +" are \"exported\" via ``__all__`` (``client``, ``common``, ``server``, " +"``simulation``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:63 +msgid "" +"What does this mean? It means that ``client``, ``common``, ``server`` and" +" ``simulation`` are part of the public API, but ``cli`` and ``proto`` are" +" not. The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. A " +"private API can change completely from one release to the next (even in " +"patch releases). It can change in a breaking way, it can be renamed (for " +"example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can " +"even be removed completely." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:70 +msgid "Therefore, as a Flower user:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:72 +msgid "``from flwr import client`` ✅ Ok, you're importing a public API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:73 +msgid "" +"``from flwr import proto`` ❌ Not recommended, you're importing a private " +"API." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:75 +msgid "" +"What about components that are nested deeper in the hierarchy? Let's look" +" at Flower strategies to see another typical pattern. Flower strategies " +"like ``FedAvg`` are often imported using ``from flwr.server.strategy " +"import FedAvg``. Let's look at " +"``src/py/flwr/server/strategy/__init__.py``:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:91 +msgid "" +"What's notable here is that all strategies are implemented in dedicated " +"modules (e.g., ``fedavg.py``). In ``__init__.py``, we *import* the " +"components we want to make part of the public API and then *export* them " +"via ``__all__``. Note that we export the component itself (for example, " +"the ``FedAvg`` class), but not the module it is defined in (for example, " +"``fedavg.py``). This allows us to move the definition of ``FedAvg`` into " +"a different module (or even a module in a subpackage) without breaking " +"the public API (as long as we update the import path in ``__init__.py``)." +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:99 +msgid "Therefore:" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:101 +msgid "" +"``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a " +"class that is part of the public API." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:5 -msgid "Edge Client Engine" -msgstr "边缘客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:103 +msgid "" +"``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're " +"importing a private module." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:7 +#: ../../source/contributor-explanation-public-and-private-apis.rst:106 msgid "" -"`Flower `_ core framework architecture with Edge " -"Client Engine" -msgstr "具有边缘客户端引擎的`Flower `核心架构" +"This approach is also implemented in the tooling that automatically " +"builds API reference docs." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:13 -msgid "Virtual Client Engine" -msgstr "虚拟客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:110 +msgid "Flower public API of private packages" +msgstr "" + +#: ../../source/contributor-explanation-public-and-private-apis.rst:112 +msgid "" +"We also use this to define the public API of private subpackages. Public," +" in this context, means the API that other ``flwr`` subpackages should " +"use. For example, ``flwr.server.driver`` is a private subpackage (it's " +"not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:15 +#: ../../source/contributor-explanation-public-and-private-apis.rst:117 msgid "" -"`Flower `_ core framework architecture with Virtual " -"Client Engine" -msgstr "具有虚拟客户端引擎的`Flower `核心架构" +"Still, the private sub-package ``flwr.server.driver`` defines a " +"\"public\" API using ``__all__`` in " +"``src/py/flwr/server/driver/__init__.py``:" +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:21 -msgid "Virtual Client Engine and Edge Client Engine in the same workload" -msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" +#: ../../source/contributor-explanation-public-and-private-apis.rst:132 +msgid "" +"The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` " +"are never used by Flower framework users, only by other parts of the " +"Flower framework codebase. Those other parts of the codebase import, for " +"example, ``InMemoryDriver`` using ``from flwr.server.driver import " +"InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``)," +" not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` " +"(``in_memory_driver.py`` is the module containing the actual " +"``InMemoryDriver`` class definition)." +msgstr "" -#: ../../source/contributor-explanation-architecture.rst:23 +#: ../../source/contributor-explanation-public-and-private-apis.rst:140 msgid "" -"`Flower `_ core framework architecture with both " -"Virtual Client Engine and Edge Client Engine" -msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" +"This is because ``flwr.server.driver`` defines a public interface for " +"other ``flwr`` subpackages. This allows codeowners of " +"``flwr.server.driver`` to refactor the package without breaking other " +"``flwr``-internal users." +msgstr "" #: ../../source/contributor-how-to-build-docker-images.rst:2 -msgid "How to build Docker Flower images locally" +#, fuzzy +msgid "How to Build Docker Flower Images Locally" msgstr "如何在本地搭建Docker Flower images" #: ../../source/contributor-how-to-build-docker-images.rst:4 @@ -79,28 +234,17 @@ msgid "" "development environment." msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" -#: ../../source/contributor-how-to-build-docker-images.rst:12 +#: ../../source/contributor-how-to-build-docker-images.rst:13 #, fuzzy -msgid "Clone the flower repository." +msgid "Clone the ``flower`` repository." msgstr "**叉花仓库**" -#: ../../source/contributor-how-to-build-docker-images.rst:18 -#: ../../source/how-to-run-flower-using-docker.rst:165 +#: ../../source/contributor-how-to-build-docker-images.rst:19 #, fuzzy msgid "Verify the Docker daemon is running." msgstr "验证 Docker 守护进程是否正在运行。" -#: ../../source/contributor-how-to-build-docker-images.rst:20 -#: ../../source/how-to-run-flower-using-docker.rst:167 -#, fuzzy -msgid "" -"Please follow the first section on :doc:`Run Flower using Docker ` which covers this step in more detail." -msgstr "" -"请阅读 :doc:`Run Flower using Docker ` " -"的第一节,其中更详细地介绍了这一步骤。" - -#: ../../source/contributor-how-to-build-docker-images.rst:25 +#: ../../source/contributor-how-to-build-docker-images.rst:21 #, fuzzy msgid "" "The build instructions that assemble the images are located in the " @@ -108,7 +252,7 @@ msgid "" "``src/docker``." msgstr "组装镜像的构建说明位于各自的 Dockerfile 中。你可以在 ``src/docker`` 的子目录中找到它们。" -#: ../../source/contributor-how-to-build-docker-images.rst:28 +#: ../../source/contributor-how-to-build-docker-images.rst:24 #, fuzzy msgid "" "Flower Docker images are configured via build arguments. Through build " @@ -123,225 +267,251 @@ msgstr "" "``PYTHON_VERSION`` 联编参数指定要安装的 Python " "版本。有些联编参数有默认值,有些则必须在联编映像时指定。每个映像的所有可用联编参数都列在下表中。" -#: ../../source/contributor-how-to-build-docker-images.rst:35 +#: ../../source/contributor-how-to-build-docker-images.rst:32 #, fuzzy -msgid "Building the base image" +msgid "Building the Base Image" msgstr "加载数据" -#: ../../source/contributor-how-to-build-docker-images.rst:41 -#: ../../source/contributor-how-to-build-docker-images.rst:98 +#: ../../source/contributor-how-to-build-docker-images.rst:38 +#: ../../source/contributor-how-to-build-docker-images.rst:104 #, fuzzy msgid "Build argument" msgstr "构建文档" -#: ../../source/contributor-how-to-build-docker-images.rst:42 -#: ../../source/contributor-how-to-build-docker-images.rst:99 +#: ../../source/contributor-how-to-build-docker-images.rst:39 +#: ../../source/contributor-how-to-build-docker-images.rst:105 #, fuzzy msgid "Description" msgstr "停用" -#: ../../source/contributor-how-to-build-docker-images.rst:43 -#: ../../source/contributor-how-to-build-docker-images.rst:100 +#: ../../source/contributor-how-to-build-docker-images.rst:40 +#: ../../source/contributor-how-to-build-docker-images.rst:106 #, fuzzy msgid "Required" msgstr "所需变更" -#: ../../source/contributor-how-to-build-docker-images.rst:44 -#: ../../source/contributor-how-to-build-docker-images.rst:101 +#: ../../source/contributor-how-to-build-docker-images.rst:41 +#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/docker/persist-superlink-state.rst:19 +#: ../../source/docker/pin-version.rst:12 +#: ../../source/docker/set-environment-variables.rst:8 #, fuzzy msgid "Example" msgstr "实例" -#: ../../source/contributor-how-to-build-docker-images.rst:45 +#: ../../source/contributor-how-to-build-docker-images.rst:42 msgid "``DISTRO``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:46 +#: ../../source/contributor-how-to-build-docker-images.rst:43 #, fuzzy msgid "The Linux distribution to use as the base image." msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:47 -#: ../../source/contributor-how-to-build-docker-images.rst:51 -#: ../../source/contributor-how-to-build-docker-images.rst:55 -#: ../../source/contributor-how-to-build-docker-images.rst:71 -#: ../../source/contributor-how-to-build-docker-images.rst:104 +#: ../../source/contributor-how-to-build-docker-images.rst:44 +#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:52 +#: ../../source/contributor-how-to-build-docker-images.rst:68 +#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:110 #, fuzzy msgid "No" msgstr "现在" -#: ../../source/contributor-how-to-build-docker-images.rst:48 +#: ../../source/contributor-how-to-build-docker-images.rst:45 #, fuzzy msgid "``ubuntu``" msgstr "``UBUNTU_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:49 +#: ../../source/contributor-how-to-build-docker-images.rst:46 #, fuzzy msgid "``DISTRO_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:50 +#: ../../source/contributor-how-to-build-docker-images.rst:47 msgid "Version of the Linux distribution." msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:52 -#, fuzzy -msgid "``22.04``" -msgstr "``1.0.0rc1``" +#: ../../source/contributor-how-to-build-docker-images.rst:49 +msgid ":substitution-code:`|ubuntu_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:53 +#: ../../source/contributor-how-to-build-docker-images.rst:50 #, fuzzy msgid "``PYTHON_VERSION``" msgstr "Python 版本" -#: ../../source/contributor-how-to-build-docker-images.rst:54 +#: ../../source/contributor-how-to-build-docker-images.rst:51 #, fuzzy msgid "Version of ``python`` to be installed." msgstr "要安装的 ``python`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:53 msgid "``3.11`` or ``3.11.1``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:57 +#: ../../source/contributor-how-to-build-docker-images.rst:54 #, fuzzy msgid "``PIP_VERSION``" msgstr "``PIP_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:58 +#: ../../source/contributor-how-to-build-docker-images.rst:55 #, fuzzy msgid "Version of ``pip`` to be installed." msgstr "要安装的 ``pip` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:59 -#: ../../source/contributor-how-to-build-docker-images.rst:63 -#: ../../source/contributor-how-to-build-docker-images.rst:67 -#: ../../source/contributor-how-to-build-docker-images.rst:108 +#: ../../source/contributor-how-to-build-docker-images.rst:56 +#: ../../source/contributor-how-to-build-docker-images.rst:60 +#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:114 #, fuzzy msgid "Yes" msgstr "类型" -#: ../../source/contributor-how-to-build-docker-images.rst:60 -#, fuzzy -msgid "``23.0.1``" -msgstr "``1.0.0rc1``" +#: ../../source/contributor-how-to-build-docker-images.rst:57 +msgid ":substitution-code:`|pip_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:61 +#: ../../source/contributor-how-to-build-docker-images.rst:58 #, fuzzy msgid "``SETUPTOOLS_VERSION``" msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:62 +#: ../../source/contributor-how-to-build-docker-images.rst:59 #, fuzzy msgid "Version of ``setuptools`` to be installed." msgstr "要安装的 `setuptools`` 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:64 +#: ../../source/contributor-how-to-build-docker-images.rst:61 #, fuzzy -msgid "``69.0.2``" -msgstr "``1.0.0b0``" +msgid ":substitution-code:`|setuptools_version|`" +msgstr "设置工具版本" -#: ../../source/contributor-how-to-build-docker-images.rst:65 +#: ../../source/contributor-how-to-build-docker-images.rst:62 #, fuzzy msgid "``FLWR_VERSION``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:66 +#: ../../source/contributor-how-to-build-docker-images.rst:63 #, fuzzy msgid "Version of Flower to be installed." msgstr "要安装的 Flower 版本。" -#: ../../source/contributor-how-to-build-docker-images.rst:68 -#, fuzzy -msgid "``1.8.0``" -msgstr "``1.0.0b0``" +#: ../../source/contributor-how-to-build-docker-images.rst:65 +msgid ":substitution-code:`|stable_flwr_version|`" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:69 +#: ../../source/contributor-how-to-build-docker-images.rst:66 #, fuzzy msgid "``FLWR_PACKAGE``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:70 +#: ../../source/contributor-how-to-build-docker-images.rst:67 #, fuzzy msgid "The Flower package to be installed." msgstr "要安装的 PyPI 软件包。" -#: ../../source/contributor-how-to-build-docker-images.rst:72 +#: ../../source/contributor-how-to-build-docker-images.rst:69 msgid "``flwr`` or ``flwr-nightly``" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:75 +#: ../../source/contributor-how-to-build-docker-images.rst:70 +#, fuzzy +msgid "``FLWR_VERSION_REF``" +msgstr "``FLWR_VERSION``" + +#: ../../source/contributor-how-to-build-docker-images.rst:71 +msgid "" +"A `direct reference " +"`_ without the ``@`` specifier. If both " +"``FLWR_VERSION`` and ``FLWR_VERSION_REF`` are specified, the " +"``FLWR_VERSION_REF`` has precedence." +msgstr "" + +#: ../../source/contributor-how-to-build-docker-images.rst:76 +#, fuzzy +msgid "`Direct Reference Examples`_" +msgstr "示例请求" + +#: ../../source/contributor-how-to-build-docker-images.rst:78 #, fuzzy msgid "" "The following example creates a base Ubuntu/Alpine image with Python " -"3.11.0, pip 23.0.1, setuptools 69.0.2 and Flower 1.8.0:" +"``3.11.0``, pip :substitution-code:`|pip_version|`, setuptools " +":substitution-code:`|setuptools_version|` and Flower :substitution-" +"code:`|stable_flwr_version|`:" msgstr "下面的示例使用 Python 3.11.0、pip 23.0.1 和 setuptools 69.0.2 创建了基本映像:" -#: ../../source/contributor-how-to-build-docker-images.rst:88 +#: ../../source/contributor-how-to-build-docker-images.rst:93 #, fuzzy msgid "" -"The name of image is ``flwr_base`` and the tag ``0.1.0``. Remember that " -"the build arguments as well as the name and tag can be adapted to your " -"needs. These values serve as examples only." +"In this example, we specify our image name as ``flwr_base`` and the tag " +"as ``0.1.0``. Remember that the build arguments as well as the name and " +"tag can be adapted to your needs. These values serve as examples only." msgstr "图像名称为 ``flwr_base``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" -#: ../../source/contributor-how-to-build-docker-images.rst:92 +#: ../../source/contributor-how-to-build-docker-images.rst:98 #, fuzzy -msgid "Building the SuperLink/SuperNode or ServerApp image" -msgstr "启动服务器" +msgid "Building a Flower Binary Image" +msgstr "加载数据" -#: ../../source/contributor-how-to-build-docker-images.rst:102 +#: ../../source/contributor-how-to-build-docker-images.rst:108 #, fuzzy msgid "``BASE_REPOSITORY``" msgstr "基础存储库" -#: ../../source/contributor-how-to-build-docker-images.rst:103 +#: ../../source/contributor-how-to-build-docker-images.rst:109 #, fuzzy msgid "The repository name of the base image." msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:105 +#: ../../source/contributor-how-to-build-docker-images.rst:111 #, fuzzy msgid "``flwr/base``" msgstr "``FLWR_VERSION``" -#: ../../source/contributor-how-to-build-docker-images.rst:106 +#: ../../source/contributor-how-to-build-docker-images.rst:112 #, fuzzy msgid "``BASE_IMAGE``" msgstr "基础存储库" -#: ../../source/contributor-how-to-build-docker-images.rst:107 +#: ../../source/contributor-how-to-build-docker-images.rst:113 #, fuzzy msgid "The Tag of the Flower base image." msgstr "基础镜像的存储库名称。" -#: ../../source/contributor-how-to-build-docker-images.rst:109 -msgid "``1.8.0-py3.10-ubuntu22.04``" +#: ../../source/contributor-how-to-build-docker-images.rst:115 +msgid ":substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|`" msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:111 -#, fuzzy +#: ../../source/contributor-how-to-build-docker-images.rst:117 msgid "" -"The following example creates a SuperLink/SuperNode or ServerApp image " -"with the official Flower base image:" -msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" +"For example, to build a SuperLink image with the latest Flower version, " +"Python 3.11 and Ubuntu 22.04, run the following:" +msgstr "" -#: ../../source/contributor-how-to-build-docker-images.rst:122 +#: ../../source/contributor-how-to-build-docker-images.rst:128 #, fuzzy msgid "" "If you want to use your own base image instead of the official Flower " "base image, all you need to do is set the ``BASE_REPOSITORY`` build " -"argument." +"argument to ``flwr_base`` (as we've specified above)." msgstr "" "如果您想使用自己的基础图片而不是 Flower 官方的基础图片,只需设置 ``BASE_REPOSITORY`` 和 " "``BASE_IMAGE_TAG`` " "联编参数即可。`BASE_REPOSITORY``的值必须与您的图像名称一致,`BASE_IMAGE_TAG``的值必须与您的图像标签一致。" -#: ../../source/contributor-how-to-build-docker-images.rst:133 +#: ../../source/contributor-how-to-build-docker-images.rst:140 #, fuzzy msgid "After creating the image, we can test whether the image is working:" msgstr "创建图像后,我们可以测试图像是否正常工作:" +#: ../../source/contributor-how-to-build-docker-images.rst:147 +#, fuzzy +msgid "Direct Reference Examples" +msgstr "示例请求" + #: ../../source/contributor-how-to-contribute-translations.rst:2 msgid "Contribute translations" msgstr "贡献译文" @@ -387,7 +557,7 @@ msgstr "" "您需要做的第一件事就是在本`网页`_上创建一个免费的Weblate帐户。有关个人资料设置的更多信息,请参阅`这里" " `_。" -#: ../../source/contributor-how-to-contribute-translations.rst:29 +#: ../../source/contributor-how-to-contribute-translations.rst:28 msgid "" "Once you are signed in to Weblate, you can navigate to the `Flower " "Framework project `_。在这里,您可以看到网站上现有的各种语言。" -#: ../../source/contributor-how-to-contribute-translations.rst:34 +#: ../../source/contributor-how-to-contribute-translations.rst:32 msgid "" "Once you have selected the language you want to contribute to, you should" " see a similar interface to this:" msgstr "选择您要贡献的语言后,您应该会看到与此类似的界面:" -#: ../../source/contributor-how-to-contribute-translations.rst:39 +#: ../../source/contributor-how-to-contribute-translations.rst:37 msgid "" "The most straight forward option here is to click on the ``Translate`` " "button on the top right (in the ``Translation status`` section). This " @@ -412,11 +582,11 @@ msgid "" "untranslated strings." msgstr "最简单的方法是点击右上角(\"翻译状态 \"部分)的 \"翻译 \"按钮。这将自动带您进入未翻译字符串的翻译界面。" -#: ../../source/contributor-how-to-contribute-translations.rst:43 +#: ../../source/contributor-how-to-contribute-translations.rst:41 msgid "This is what the interface looks like:" msgstr "这就是界面的样子:" -#: ../../source/contributor-how-to-contribute-translations.rst:47 +#: ../../source/contributor-how-to-contribute-translations.rst:45 #, fuzzy msgid "" "You input your translation in the text box at the top and then, once you " @@ -430,7 +600,7 @@ msgstr "" "\"保存并继续\"(保存翻译内容并转到下一个未翻译的字符串)、\"保存并停留\"(保存翻译内容并停留在同一页面)、\"建议\"(将您的翻译添加到建议中供其他用户查看)或" " \"跳过\"(转到下一个未翻译的字符串而不保存任何内容)。" -#: ../../source/contributor-how-to-contribute-translations.rst:54 +#: ../../source/contributor-how-to-contribute-translations.rst:51 msgid "" "In order to help with the translations, you can see on the bottom the " "``Nearby strings``, the ``Comments`` (from other contributors), the " @@ -441,14 +611,14 @@ msgstr "" "为了帮助翻译,您可以在底部看到 \"邻近字符串\"、\"评论\"(来自其他贡献者)、\"自动建议\"(来自机器翻译引擎)、\"其他语言 " "\"中的翻译以及该字符串的 \"历史翻译\"。" -#: ../../source/contributor-how-to-contribute-translations.rst:59 +#: ../../source/contributor-how-to-contribute-translations.rst:56 msgid "" "On the right, under the ``String information`` section, you can also " "click the link under ``Source string location`` in order to view the " "source of the doc file containing the string." msgstr "在右侧的 \"字符串信息 \"部分,您还可以单击 \"源字符串位置 \"下的链接,以查看包含字符串的 doc 文件的源文件。" -#: ../../source/contributor-how-to-contribute-translations.rst:63 +#: ../../source/contributor-how-to-contribute-translations.rst:60 msgid "" "For more information about translating using Weblate, you can check out " "this `in-depth guide " @@ -457,11 +627,11 @@ msgstr "" "有关使用 Weblate 进行翻译的更多信息,您可以查看本 \"深入指南 " "`_\"。" -#: ../../source/contributor-how-to-contribute-translations.rst:67 +#: ../../source/contributor-how-to-contribute-translations.rst:64 msgid "Add new languages" msgstr "添加新语言" -#: ../../source/contributor-how-to-contribute-translations.rst:69 +#: ../../source/contributor-how-to-contribute-translations.rst:66 msgid "" "If you want to add a new language, you will first have to contact us, " "either on `Slack `_, or by opening an issue" @@ -484,24 +654,24 @@ msgstr "" "在开发 Flower 框架时,我们希望确保所有贡献者使用相同的开发环境来格式化代码或运行测试。为此,我们使用了 VSCode " "远程容器扩展。这是什么?请阅读下面这段话:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:8 +#, fuzzy msgid "" "The Visual Studio Code Remote - Containers extension lets you use a " "Docker container as a fully-featured development environment. It allows " "you to open any folder inside (or mounted into) a container and take " "advantage of Visual Studio Code's full feature set. A " -":code:`devcontainer.json` file in your project tells VS Code how to " -"access (or create) a development container with a well-defined tool and " -"runtime stack. This container can be used to run an application or to " -"separate tools, libraries, or runtimes needed for working with a " -"codebase." +"``devcontainer.json`` file in your project tells VS Code how to access " +"(or create) a development container with a well-defined tool and runtime " +"stack. This container can be used to run an application or to separate " +"tools, libraries, or runtimes needed for working with a codebase." msgstr "" "Visual Studio Code Remote - " "Containers扩展可让你将Docker容器用作功能齐全的开发环境。它允许你打开容器内(或挂载到容器内)的任何文件夹,并利用 Visual " "Studio Code 的全部功能集。项目中的 :code:`devcontainer.json` 文件会告诉 VS Code " "如何访问(或创建)一个带有定义明确的工具和运行时栈的开发容器。该容器可用于运行应用程序,也可用于分离处理代码库所需的工具、库或运行时。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:16 msgid "" "Workspace files are mounted from the local file system or copied or " "cloned into the container. Extensions are installed and run inside the " @@ -510,33 +680,33 @@ msgid "" " environment just by connecting to a different container." msgstr "工作区文件从本地文件系统加载,或复制或克隆到容器中。扩展在容器内安装和运行,在容器内它们可以完全访问工具、平台和文件系统。这意味着,只需连接到不同的容器,就能无缝切换整个开发环境。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:22 #, fuzzy msgid "" "Source: `Official VSCode documentation " "`_" msgstr "来源:`VSCode 官方文档 `_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:26 msgid "Getting started" msgstr "开始" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:28 #, fuzzy msgid "" -"Configuring and setting up the :code:`Dockerfile` as well the " -"configuration for the devcontainer can be a bit more involved. The good " -"thing is you don't have to do it. Usually it should be enough to install " -"`Docker `_ on your system and " -"ensure its available on your command line. Additionally, install the " -"`VSCode Containers Extension `_ on your system and ensure its" +" available on your command line. Additionally, install the `VSCode " +"Containers Extension `_." msgstr "" "配置和设置 :code:`Dockerfile` 以及 devcontainer 的配置可能比较复杂。好在你想做就得做。通常只需在系统中安装 " "Docker 并确保其在命令行中可用即可。此外,请安装 `VSCode Containers Extension " "`_。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:35 msgid "" "Now you should be good to go. When starting VSCode, it will ask you to " "run in the container environment and - if you confirm - automatically " @@ -548,13 +718,13 @@ msgstr "" "现在你应该可以开始了。启动 VSCode 时,它会要求你在容器环境中运行,如果你确认,它会自动构建容器并使用它。要手动指示 VSCode 使用 " "devcontainer,可以在安装扩展后,点击 VSCode 窗口左下角的绿色区域,然后选择 \"*(重新)在容器中打开文件夹*\"选项。" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:41 msgid "" "In some cases your setup might be more involved. For those cases consult " "the following sources:" msgstr "在某些情况下,您的设置可能更复杂。有关这些情况,请参考以下资料:" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:44 #, fuzzy msgid "" "`Developing inside a Container " @@ -564,7 +734,7 @@ msgstr "" "在容器内开发 `_" -#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:46 #, fuzzy msgid "" "`Remote development in Containers " @@ -593,13 +763,13 @@ msgstr "" "依赖关系,然后重新安装(运行 ``poetry install` 前,别忘了删除 ``poetry.lock` (``rm " "poetry.lock`))。" -#: ../../source/contributor-how-to-install-development-versions.rst:12 +#: ../../source/contributor-how-to-install-development-versions.rst:14 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " "extras)" msgstr "``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (不含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:13 +#: ../../source/contributor-how-to-install-development-versions.rst:15 msgid "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (with extras)" @@ -607,17 +777,17 @@ msgstr "" "``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " "[\"simulation\"] }`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:15 +#: ../../source/contributor-how-to-install-development-versions.rst:18 msgid "" "Install ``flwr`` from a local copy of the Flower source code via " "``pyproject.toml``:" msgstr "通过 ``pyproject.toml`` 从 Flower 源代码的本地副本安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:17 +#: ../../source/contributor-how-to-install-development-versions.rst:20 msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" msgstr "``flwr = { path = \"../../\", develop = true }`` (不含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:18 +#: ../../source/contributor-how-to-install-development-versions.rst:21 msgid "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (with extras)" @@ -625,18 +795,18 @@ msgstr "" "``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " "}`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:20 +#: ../../source/contributor-how-to-install-development-versions.rst:23 msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" msgstr "通过 ``pyproject.toml`` 从本地轮子文件安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:22 +#: ../../source/contributor-how-to-install-development-versions.rst:25 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\" }`` (without" " extras)" msgstr "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }``(无额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:23 +#: ../../source/contributor-how-to-install-development-versions.rst:26 #, fuzzy msgid "" "``flwr = { path = \"../../dist/flwr-1.8.0-py3-none-any.whl\", extras = " @@ -645,7 +815,7 @@ msgstr "" "``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " "[\"simulation\"] }`` (包含额外内容)" -#: ../../source/contributor-how-to-install-development-versions.rst:25 +#: ../../source/contributor-how-to-install-development-versions.rst:29 msgid "" "Please refer to the Poetry documentation for further details: `Poetry " "Dependency Specification `_" -#: ../../source/contributor-how-to-install-development-versions.rst:28 +#: ../../source/contributor-how-to-install-development-versions.rst:33 msgid "Using pip (recommended on Colab)" msgstr "使用 pip(建议在 Colab 上使用)" -#: ../../source/contributor-how-to-install-development-versions.rst:30 +#: ../../source/contributor-how-to-install-development-versions.rst:35 msgid "Install a ``flwr`` pre-release from PyPI:" msgstr "从 PyPI 安装 ``flwr`` 预发行版:" -#: ../../source/contributor-how-to-install-development-versions.rst:32 +#: ../../source/contributor-how-to-install-development-versions.rst:37 msgid "``pip install -U --pre flwr`` (without extras)" msgstr "``pip install -U -pre flwr``(不含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:33 +#: ../../source/contributor-how-to-install-development-versions.rst:38 msgid "``pip install -U --pre 'flwr[simulation]'`` (with extras)" msgstr "``pip install -U -pre 'flwr[simulation]'``(包含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:35 +#: ../../source/contributor-how-to-install-development-versions.rst:40 msgid "" "Python packages can be installed from git repositories. Use one of the " "following commands to install the Flower directly from GitHub." msgstr "Python 软件包可以从 git 仓库安装。使用以下命令之一直接从 GitHub 安装 Flower。" -#: ../../source/contributor-how-to-install-development-versions.rst:37 +#: ../../source/contributor-how-to-install-development-versions.rst:43 msgid "Install ``flwr`` from the default GitHub branch (``main``):" msgstr "从 GitHub 的默认分支 (``main`) 安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:39 +#: ../../source/contributor-how-to-install-development-versions.rst:45 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git`` (without " "extras)" msgstr "``pip install flwr@git+https://github.com/adap/flower.git`` (不含额外功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:40 +#: ../../source/contributor-how-to-install-development-versions.rst:46 msgid "" -"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` " -"(with extras)" +"``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'``" +" (with extras)" msgstr "" "``pip install " "'flwr[simulation]@git+https://github.com/adap/flower.git'``(带附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:42 +#: ../../source/contributor-how-to-install-development-versions.rst:49 msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" msgstr "从特定的 GitHub 分支 (`分支名`) 安装 ``flwr``:" -#: ../../source/contributor-how-to-install-development-versions.rst:44 +#: ../../source/contributor-how-to-install-development-versions.rst:51 msgid "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(without extras)" @@ -706,24 +876,26 @@ msgstr "" "``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " "(不含附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:45 +#: ../../source/contributor-how-to-install-development-versions.rst:53 msgid "" "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git" "@branch-name'`` (with extras)" -msgstr "``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@分支名'``(带附加功能)" +msgstr "" +"``pip install " +"'flwr[simulation]@git+https://github.com/adap/flower.git@分支名'``(带附加功能)" -#: ../../source/contributor-how-to-install-development-versions.rst:49 +#: ../../source/contributor-how-to-install-development-versions.rst:57 msgid "Open Jupyter Notebooks on Google Colab" msgstr "在谷歌 Colab 上打开 Jupyter 笔记本" -#: ../../source/contributor-how-to-install-development-versions.rst:51 +#: ../../source/contributor-how-to-install-development-versions.rst:59 #, fuzzy msgid "" "Open the notebook ``doc/source/tutorial-series-get-started-with-flower-" "pytorch.ipynb``:" msgstr "打开笔记本 ``doc/source/tutorial-get-started-with-flower-pytorch.ipynb``:" -#: ../../source/contributor-how-to-install-development-versions.rst:53 +#: ../../source/contributor-how-to-install-development-versions.rst:61 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" @@ -732,7 +904,7 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/main/doc/source" "/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:55 +#: ../../source/contributor-how-to-install-development-versions.rst:63 msgid "" "Open a development version of the same notebook from branch `branch-name`" " by changing ``main`` to ``branch-name`` (right after ``blob``):" @@ -740,7 +912,7 @@ msgstr "" "将 ``main`` 改为 ``branch-name``(紧跟在 ``blob``之后),从分支 `branch-name` " "打开同一笔记本的开发版本:" -#: ../../source/contributor-how-to-install-development-versions.rst:57 +#: ../../source/contributor-how-to-install-development-versions.rst:66 #, fuzzy msgid "" "https://colab.research.google.com/github/adap/flower/blob/branch-" @@ -749,22 +921,22 @@ msgstr "" "https://colab.research.google.com/github/adap/flower/blob/branch-" "name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" -#: ../../source/contributor-how-to-install-development-versions.rst:59 +#: ../../source/contributor-how-to-install-development-versions.rst:68 msgid "Install a `whl` on Google Colab:" msgstr "在 Google Colab 上安装 `whl`:" -#: ../../source/contributor-how-to-install-development-versions.rst:61 +#: ../../source/contributor-how-to-install-development-versions.rst:70 msgid "" "In the vertical icon grid on the left hand side, select ``Files`` > " "``Upload to session storage``" msgstr "在左侧的垂直图标网格中,选择 \"文件\">\"上传到会话存储\"" -#: ../../source/contributor-how-to-install-development-versions.rst:62 +#: ../../source/contributor-how-to-install-development-versions.rst:72 #, fuzzy msgid "Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``)" msgstr "更新 whl (e.g., ``flwr-1.7.0-py3-none-any.whl``)" -#: ../../source/contributor-how-to-install-development-versions.rst:63 +#: ../../source/contributor-how-to-install-development-versions.rst:73 #, fuzzy msgid "" "Change ``!pip install -q 'flwr[simulation]' torch torchvision " @@ -785,18 +957,18 @@ msgid "" "change in the future." msgstr "本文件描述了当前的发布流程。今后可能会有变化,也可能不会有变化。" -#: ../../source/contributor-how-to-release-flower.rst:7 +#: ../../source/contributor-how-to-release-flower.rst:8 msgid "During the release" msgstr "在发布期间" -#: ../../source/contributor-how-to-release-flower.rst:9 +#: ../../source/contributor-how-to-release-flower.rst:10 msgid "" "The version number of a release is stated in ``pyproject.toml``. To " "release a new version of Flower, the following things need to happen (in " "that order):" msgstr "版本号在 ``pyproject.toml`` 中说明。要发布 Flower 的新版本,需要完成以下工作(按顺序排列):" -#: ../../source/contributor-how-to-release-flower.rst:11 +#: ../../source/contributor-how-to-release-flower.rst:13 #, fuzzy msgid "" "Run ``python3 src/py/flwr_tool/update_changelog.py `` in " @@ -806,7 +978,7 @@ msgstr "" "运行 ``python3 src/py/flwr_tool/update_changelog.py `` " "以将每项新更改添加到更新日志中(之后可对更新日志进行手动更改,直到看起来不错为止)。" -#: ../../source/contributor-how-to-release-flower.rst:12 +#: ../../source/contributor-how-to-release-flower.rst:16 #, fuzzy msgid "" "Once the changelog has been updated with all the changes, run ``./dev" @@ -820,7 +992,7 @@ msgstr "" "v``,其中````是``pyproject.toml``中的版本(注意前面的``v``)。这将用版本和当前日期替换更新日志中的" " ``Unreleased`` 标头,并为贡献者添加一条感谢信息。打开一个包含这些更改的拉取请求。" -#: ../../source/contributor-how-to-release-flower.rst:13 +#: ../../source/contributor-how-to-release-flower.rst:22 #, fuzzy msgid "" "Once the pull request is merged, tag the release commit with the version " @@ -832,143 +1004,93 @@ msgstr "" "在 PR 合并后立即用版本号标记发布提交:``git tag v0.12.3``,然后``git push --tags``。这将在 GitHub" " 上创建一个包含正确工件和更新日志相关部分的发布草案。" -#: ../../source/contributor-how-to-release-flower.rst:14 +#: ../../source/contributor-how-to-release-flower.rst:26 msgid "Check the draft release on GitHub, and if everything is good, publish it." msgstr "检查 GitHub 上的发布稿,如果一切正常,就发布它。" -#: ../../source/contributor-how-to-release-flower.rst:15 -#, fuzzy -msgid "Trigger the CI for building the Docker images." -msgstr "官方 Ubuntu Docker 映像的版本。" - -#: ../../source/contributor-how-to-release-flower.rst:17 -msgid "" -"To trigger the workflow, a collaborator must create a " -"``workflow_dispatch`` event in the GitHub CI. This can be done either " -"through the UI or via the GitHub CLI. The event requires only one input, " -"the Flower version, to be released." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:21 -#, fuzzy -msgid "**Via the UI**" -msgstr "**审查 PR**" - -#: ../../source/contributor-how-to-release-flower.rst:23 -msgid "" -"Go to the ``Build docker images`` workflow `page " -"`_." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:24 -msgid "" -"Click on the ``Run workflow`` button and type the new version of Flower " -"in the ``Version of Flower`` input field." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:25 -msgid "Click on the **green** ``Run workflow`` button." -msgstr "" - #: ../../source/contributor-how-to-release-flower.rst:29 -msgid "**Via the GitHub CI**" -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:31 -msgid "" -"Make sure you are logged in via ``gh auth login`` and that the current " -"working directory is the root of the Flower repository." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:32 -msgid "" -"Trigger the workflow via ``gh workflow run docker-images.yml -f flwr-" -"version=``." -msgstr "" - -#: ../../source/contributor-how-to-release-flower.rst:35 msgid "After the release" msgstr "发布后" -#: ../../source/contributor-how-to-release-flower.rst:37 +#: ../../source/contributor-how-to-release-flower.rst:31 msgid "Create a pull request which contains the following changes:" msgstr "创建包含以下更改的拉取请求:" -#: ../../source/contributor-how-to-release-flower.rst:39 +#: ../../source/contributor-how-to-release-flower.rst:33 msgid "Increase the minor version in ``pyproject.toml`` by one." msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/contributor-how-to-release-flower.rst:40 +#: ../../source/contributor-how-to-release-flower.rst:34 msgid "Update all files which contain the current version number if necessary." msgstr "如有必要,更新包含当前版本号的所有文件。" -#: ../../source/contributor-how-to-release-flower.rst:41 +#: ../../source/contributor-how-to-release-flower.rst:35 msgid "Add a new ``Unreleased`` section in ``changelog.md``." msgstr "在 ``changelog.md`` 中添加新的 ``Unreleased`` 部分。" -#: ../../source/contributor-how-to-release-flower.rst:43 +#: ../../source/contributor-how-to-release-flower.rst:37 msgid "" "Merge the pull request on the same day (i.e., before a new nightly " "release gets published to PyPI)." msgstr "在同一天合并拉取请求(即在新版本发布到 PyPI 之前)。" -#: ../../source/contributor-how-to-release-flower.rst:46 +#: ../../source/contributor-how-to-release-flower.rst:41 msgid "Publishing a pre-release" msgstr "发布预发布版本" -#: ../../source/contributor-how-to-release-flower.rst:49 +#: ../../source/contributor-how-to-release-flower.rst:44 msgid "Pre-release naming" msgstr "释放前命名" -#: ../../source/contributor-how-to-release-flower.rst:51 +#: ../../source/contributor-how-to-release-flower.rst:46 msgid "" "PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases" " MUST use one of the following naming patterns:" msgstr "PyPI 支持预发布版本(alpha、beta、release candidate)。预发布版本必须使用以下命名模式之一:" -#: ../../source/contributor-how-to-release-flower.rst:53 +#: ../../source/contributor-how-to-release-flower.rst:49 msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" msgstr "阿尔法 ``MAJOR.MINOR.PATCHaN``" -#: ../../source/contributor-how-to-release-flower.rst:54 +#: ../../source/contributor-how-to-release-flower.rst:50 msgid "Beta: ``MAJOR.MINOR.PATCHbN``" msgstr "贝塔: ``MAJOR.MINOR.PATCHbN``" -#: ../../source/contributor-how-to-release-flower.rst:55 +#: ../../source/contributor-how-to-release-flower.rst:51 msgid "Release candidate (RC): ``MAJOR.MINOR.PATCHrcN``" msgstr "版本代号 (RC): ``MAJOR.MINOR.PATCHrcN``" -#: ../../source/contributor-how-to-release-flower.rst:57 +#: ../../source/contributor-how-to-release-flower.rst:53 msgid "Examples include:" msgstr "例子包括:" -#: ../../source/contributor-how-to-release-flower.rst:59 +#: ../../source/contributor-how-to-release-flower.rst:55 msgid "``1.0.0a0``" msgstr "``1.0.0a0``" -#: ../../source/contributor-how-to-release-flower.rst:60 +#: ../../source/contributor-how-to-release-flower.rst:56 msgid "``1.0.0b0``" msgstr "``1.0.0b0``" -#: ../../source/contributor-how-to-release-flower.rst:61 +#: ../../source/contributor-how-to-release-flower.rst:57 msgid "``1.0.0rc0``" msgstr "``1.0.0rc0``" -#: ../../source/contributor-how-to-release-flower.rst:62 +#: ../../source/contributor-how-to-release-flower.rst:58 msgid "``1.0.0rc1``" msgstr "``1.0.0rc1``" -#: ../../source/contributor-how-to-release-flower.rst:64 +#: ../../source/contributor-how-to-release-flower.rst:60 msgid "" "This is in line with PEP-440 and the recommendations from the Python " "Packaging Authority (PyPA):" msgstr "这符合 PEP-440 和 Python 包装管理局 (PyPA) 的建议:" -#: ../../source/contributor-how-to-release-flower.rst:67 +#: ../../source/contributor-how-to-release-flower.rst:63 msgid "`PEP-440 `_" msgstr "`PEP-440 `_" -#: ../../source/contributor-how-to-release-flower.rst:68 +#: ../../source/contributor-how-to-release-flower.rst:64 msgid "" "`PyPA Choosing a versioning scheme " "`_" -#: ../../source/contributor-how-to-release-flower.rst:70 +#: ../../source/contributor-how-to-release-flower.rst:67 msgid "" "Note that the approach defined by PyPA is not compatible with SemVer " "2.0.0 spec, for details consult the `Semantic Versioning Specification " @@ -1003,11 +1125,11 @@ msgid "" "surface this will become the next stable release" msgstr "RC:功能完整,无已知问题(除了下一个稳定版中被列为 \"不会修复 \"的问题)--如果没有问题出现,这将成为下一个稳定版" -#: ../../source/contributor-how-to-release-flower.rst:78 +#: ../../source/contributor-how-to-release-flower.rst:80 msgid "Beta: feature complete, allowed to have known issues" msgstr "贝塔版:功能完整,允许存在已知问题" -#: ../../source/contributor-how-to-release-flower.rst:79 +#: ../../source/contributor-how-to-release-flower.rst:81 msgid "Alpha: not feature complete, allowed to have known issues" msgstr "阿尔法版:功能不完整,允许存在已知问题" @@ -1025,21 +1147,21 @@ msgstr "" "建议在虚拟环境中运行 Python 设置。本指南展示了如何使用 pyenv virtualenv、poes 或 Anaconda " "创建虚拟环境的三个不同示例。您可以按照说明或选择您喜欢的设置。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:10 msgid "Python Version" msgstr "Python 版本" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 -#: ../../source/how-to-install-flower.rst:8 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:12 +#: ../../source/how-to-install-flower.rst:7 msgid "" -"Flower requires at least `Python 3.8 `_, " +"Flower requires at least `Python 3.9 `_, " "but `Python 3.10 `_ or above is " "recommended." msgstr "" -"Flower 至少需要 `Python 3.8 `_,但建议使用 `Python " +"Flower 至少需要 `Python 3.9 `_,但建议使用 `Python " "3.10 `_或更高版本。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:17 #, fuzzy msgid "" "Due to a known incompatibility with `ray " @@ -1050,12 +1172,12 @@ msgstr "" "由于已知与 `ray `_ 不兼容,我们目前建议最多使用 `Python 3.11" " `_ 运行 Flower 仿真。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:19 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:22 #, fuzzy msgid "Virtualenv with Pyenv/Virtualenv" msgstr "Virutualenv 和 Pyenv/Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:21 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 msgid "" "One of the recommended virtual environment is `pyenv " "`_/`virtualenv `_。详情请参见 `Flower 示例 " "`_。" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:23 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 msgid "" "Once Pyenv is set up, you can use it to install `Python Version 3.10 " "`_ or above:" msgstr "一旦设置好 Pyenv,就可以用它来安装 `Python 3.10 `_ 或更高版本:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:29 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 msgid "Create the virtualenv with:" msgstr "创建虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:36 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:42 msgid "Activate the virtualenv by running the following command:" msgstr "运行以下命令激活 virtualenv:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:44 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:49 msgid "Virtualenv with Poetry" msgstr "有诗意的 Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:46 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:51 msgid "" "The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " @@ -1093,36 +1215,37 @@ msgstr "" "Flower 示例基于 `Poetry `_ 来管理依赖关系。安装 Poetry" " 后,只需创建一个虚拟环境即可:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:52 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:58 msgid "" "If you open a new terminal you can activate the previously created " "virtual environment with the following command:" msgstr "如果打开一个新终端,可以使用以下命令激活之前创建的虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:60 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:66 msgid "Virtualenv with Anaconda" msgstr "使用 Anaconda 的 Virtualenv" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:62 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#, fuzzy msgid "" "If you prefer to use Anaconda for your virtual environment then install " "and setup the `conda `_ package. After setting it up you can " +"/user-guide/install/index.html>`_ package. After setting it up you can " "create a virtual environment with:" msgstr "" "如果你更喜欢在虚拟环境中使用 Anaconda,那么请安装并设置 `conda " "`_ 软件包。设置完成后,您就可以使用以下工具创建虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:68 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 msgid "and activate the virtual environment with:" msgstr "并激活虚拟环境:" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:76 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:83 msgid "And then?" msgstr "然后呢?" -#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:78 +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:85 msgid "" "As soon as you created your virtual environment you clone one of the " "`Flower examples `_." @@ -1134,11 +1257,11 @@ msgstr "" msgid "Write documentation" msgstr "编写文件" -#: ../../source/contributor-how-to-write-documentation.rst:6 +#: ../../source/contributor-how-to-write-documentation.rst:5 msgid "Project layout" msgstr "项目布局" -#: ../../source/contributor-how-to-write-documentation.rst:8 +#: ../../source/contributor-how-to-write-documentation.rst:7 msgid "" "The Flower documentation lives in the ``doc`` directory. The Sphinx-based" " documentation system supports both reStructuredText (``.rst`` files) and" @@ -1148,7 +1271,7 @@ msgstr "" "Markdown(``.md`` 文件)。" #: ../../source/contributor-how-to-write-documentation.rst:10 -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:169 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:196 #, fuzzy msgid "" "Note that, in order to build the documentation locally (with ``poetry run" @@ -1159,20 +1282,20 @@ msgstr "" "请注意,要在本地构建文档(使用 ``poetry run make html``,如下所述),系统上必须安装 ``Pandoc " "_`。" -#: ../../source/contributor-how-to-write-documentation.rst:14 +#: ../../source/contributor-how-to-write-documentation.rst:15 msgid "Edit an existing page" msgstr "编辑现有页面" -#: ../../source/contributor-how-to-write-documentation.rst:16 +#: ../../source/contributor-how-to-write-documentation.rst:17 msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" msgstr "编辑 ``doc/source/`` 下现有的 ``.rst`` (或 ``.md``) 文件" -#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:18 #: ../../source/contributor-how-to-write-documentation.rst:27 msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" msgstr "编译文档: cd doc``,然后 ``poetry run make html``" -#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:19 #: ../../source/contributor-how-to-write-documentation.rst:28 msgid "Open ``doc/build/html/index.html`` in the browser to check the result" msgstr "在浏览器中打开 ``doc/build/html/index.html`` 查看结果" @@ -1207,34 +1330,34 @@ msgstr "" "我们欢迎为Flower做出代码贡献!然而,要知道从哪里开始并非易事。因此,我们提出了一些建议,告诉您从哪里开始,以增加您的 PR 被 Flower" " 代码库接受的机会。" -#: ../../source/contributor-ref-good-first-contributions.rst:11 +#: ../../source/contributor-ref-good-first-contributions.rst:9 msgid "Where to start" msgstr "从哪里开始" -#: ../../source/contributor-ref-good-first-contributions.rst:13 +#: ../../source/contributor-ref-good-first-contributions.rst:11 msgid "" "Until the Flower core library matures it will be easier to get PR's " "accepted if they only touch non-core areas of the codebase. Good " "candidates to get started are:" msgstr "在 Flower 核心库成熟之前,如果 PR 只涉及代码库中的非核心区域,则会更容易被接受。可以从以下方面入手:" -#: ../../source/contributor-ref-good-first-contributions.rst:17 +#: ../../source/contributor-ref-good-first-contributions.rst:14 msgid "Documentation: What's missing? What could be expressed more clearly?" msgstr "文档: 缺少什么?哪些内容可以表达得更清楚?" -#: ../../source/contributor-ref-good-first-contributions.rst:18 +#: ../../source/contributor-ref-good-first-contributions.rst:15 msgid "Baselines: See below." msgstr "Baselines: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:19 +#: ../../source/contributor-ref-good-first-contributions.rst:16 msgid "Examples: See below." msgstr "示例: 见下文。" -#: ../../source/contributor-ref-good-first-contributions.rst:23 +#: ../../source/contributor-ref-good-first-contributions.rst:19 msgid "Request for Flower Baselines" msgstr "Flower Baselines的申请" -#: ../../source/contributor-ref-good-first-contributions.rst:25 +#: ../../source/contributor-ref-good-first-contributions.rst:21 #, fuzzy msgid "" "If you are not familiar with Flower Baselines, you should probably check-" @@ -1244,7 +1367,7 @@ msgstr "" "如果您对 Flower Baselines 还不熟悉,也许可以看看我们的 `Baselines贡献指南 " "`_。" -#: ../../source/contributor-ref-good-first-contributions.rst:27 +#: ../../source/contributor-ref-good-first-contributions.rst:25 #, fuzzy msgid "" "You should then check out the open `issues " @@ -1257,7 +1380,7 @@ msgstr "" "`_" " baseline请求。如果您发现了自己想做的baseline,而它还没有被分配,请随时把它分配给自己,然后开始工作!" -#: ../../source/contributor-ref-good-first-contributions.rst:31 +#: ../../source/contributor-ref-good-first-contributions.rst:30 msgid "" "Otherwise, if you don't find a baseline you'd like to work on, be sure to" " open a new issue with the baseline request template!" @@ -1300,12 +1423,13 @@ msgstr "" "包括 SecAgg、SecAgg+ 和 LightSecAgg 协议。LightSecAgg " "协议尚未实施,因此其图表和抽象在实践中可能并不准确。SecAgg 协议可视为 SecAgg+ 协议的特例。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 -msgid "The :code:`SecAgg+` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:9 +#, fuzzy +msgid "The ``SecAgg+`` abstraction" msgstr "代码:`SecAgg+` 抽象" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:11 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:163 msgid "" "In this implementation, each client will be assigned with a unique index " "(int) for secure aggregation, and thus many python dictionaries used have" @@ -1314,18 +1438,19 @@ msgstr "" "在此实现中,将为每个客户端分配一个唯一索引(int),以确保聚合的安全性,因此使用的许多 python 字典的键都是 int 类型,而不是 " "ClientProxy 类型。" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:67 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:204 msgid "" "The Flower server will execute and process received results in the " "following order:" msgstr "Flower 服务器将按以下顺序执行和处理收到的结果:" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 -msgid "The :code:`LightSecAgg` abstraction" +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +#, fuzzy +msgid "The ``LightSecAgg`` abstraction" msgstr "代码:`LightSecAgg` 抽象" -#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:277 msgid "Types" msgstr "类型" @@ -1339,7 +1464,7 @@ msgid "" "are not used to contributing to GitHub projects." msgstr "本指南适用于想参与 Flower,但不习惯为 GitHub 项目贡献的人。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +#: ../../source/contributor-tutorial-contribute-on-github.rst:7 #, fuzzy msgid "" "If you're familiar with how contributing on GitHub works, you can " @@ -1350,15 +1475,15 @@ msgstr "" "/getting-started-for-contributors.html>`_ 和 \"优秀的首次贡献示例\" " "`_。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:10 +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 msgid "Setting up the repository" msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:21 +#: ../../source/contributor-tutorial-contribute-on-github.rst:29 msgid "**Create a GitHub account and setup Git**" msgstr "**创建 GitHub 账户并设置 Git**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:13 +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 #, fuzzy msgid "" "Git is a distributed version control tool. This allows for an entire " @@ -1370,20 +1495,20 @@ msgstr "" "Git 是一种分布式版本控制工具。它可以将整个代码库的历史记录保存在每个开发人员的机器上。您需要在本地计算机上安装该软件,可以按照本指南 " "`_ 进行设置。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:16 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "" "GitHub, itself, is a code hosting platform for version control and " "collaboration. It allows for everyone to collaborate and work from " "anywhere on remote repositories." msgstr "GitHub 本身是一个用于版本控制和协作的代码托管平台。它允许每个人在任何地方对远程仓库进行协作和工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +#: ../../source/contributor-tutorial-contribute-on-github.rst:25 msgid "" "If you haven't already, you will need to create an account on `GitHub " "`_." msgstr "如果还没有,您需要在 `GitHub `_ 上创建一个账户。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +#: ../../source/contributor-tutorial-contribute-on-github.rst:28 msgid "" "The idea behind the generic Git and GitHub workflow boils down to this: " "you download code from a remote repository on GitHub, make changes " @@ -1393,22 +1518,22 @@ msgstr "" "通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " "进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:32 +#: ../../source/contributor-tutorial-contribute-on-github.rst:42 msgid "**Forking the Flower repository**" msgstr "**叉花仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:24 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 #, fuzzy msgid "" "A fork is a personal copy of a GitHub repository. To create one for " -"Flower, you must navigate to ``_ (while " +"Flower, you must navigate to https://github.com/adap/flower (while " "connected to your GitHub account) and click the ``Fork`` button situated " "on the top right of the page." msgstr "" "fork 是 GitHub 仓库的个人副本。要为 Flower 创建一个 fork,您必须导航到 " "https://github.com/adap/flower(同时连接到您的 GitHub 账户),然后点击页面右上方的 ``Fork`` 按钮。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:29 +#: ../../source/contributor-tutorial-contribute-on-github.rst:38 msgid "" "You can change the name if you want, but this is not necessary as this " "version of Flower will be yours and will sit inside your own account " @@ -1418,11 +1543,11 @@ msgstr "" "您可以更改名称,但没有必要,因为这个版本的 Flower " "将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 msgid "**Cloning your forked repository**" msgstr "**克隆你的分叉仓库**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:35 +#: ../../source/contributor-tutorial-contribute-on-github.rst:45 msgid "" "The next step is to download the forked repository on your machine to be " "able to make changes to it. On your forked repository page, you should " @@ -1432,28 +1557,28 @@ msgstr "" "下一步是在你的机器上下载分叉版本库,以便对其进行修改。在分叉版本库页面上,首先点击右侧的 \"代码 \"按钮,这样就能复制版本库的 HTTPS " "链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:41 +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 msgid "" "Once you copied the \\, you can open a terminal on your machine, " "navigate to the place you want to download the repository to and type:" msgstr "一旦复制了 (),你就可以在你的机器上打开一个终端,导航到你想下载软件源的地方,然后键入:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:47 +#: ../../source/contributor-tutorial-contribute-on-github.rst:59 #, fuzzy msgid "" "This will create a ``flower/`` (or the name of your fork if you renamed " "it) folder in the current working directory." msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:66 +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 msgid "**Add origin**" msgstr "**添加原产地**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:50 +#: ../../source/contributor-tutorial-contribute-on-github.rst:62 msgid "You can then go into the repository folder:" msgstr "然后,您就可以进入存储库文件夹:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:56 +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 msgid "" "And here we will need to add an origin to our repository. The origin is " "the \\ of the remote fork repository. To obtain it, we can do as " @@ -1463,28 +1588,28 @@ msgstr "" "在这里,我们需要为我们的版本库添加一个 origin。origin 是远程 fork 仓库的 " "\\。要获得它,我们可以像前面提到的那样,访问 GitHub 账户上的分叉仓库并复制链接。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:61 +#: ../../source/contributor-tutorial-contribute-on-github.rst:75 msgid "" "Once the \\ is copied, we can type the following command in our " "terminal:" msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:90 +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 msgid "**Add upstream**" msgstr "**增加上游**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:69 +#: ../../source/contributor-tutorial-contribute-on-github.rst:81 #, fuzzy msgid "" "Now we will add an upstream address to our repository. Still in the same " "directory, we must run the following command:" msgstr "现在,我们要为版本库添加一个上游地址。还是在同一目录下,我们必须运行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:76 +#: ../../source/contributor-tutorial-contribute-on-github.rst:88 msgid "The following diagram visually explains what we did in the previous steps:" msgstr "下图直观地解释了我们在前面步骤中的操作:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:80 +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 msgid "" "The upstream is the GitHub remote address of the parent repository (in " "this case Flower), i.e. the one we eventually want to contribute to and " @@ -1495,17 +1620,17 @@ msgstr "" "上游是父版本库(这里是 Flower)的 GitHub 远程地址,即我们最终要贡献的版本库,因此需要最新的历史记录。origin " "只是我们创建的分叉仓库的 GitHub 远程地址,即我们自己账户中的副本(分叉)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:84 +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 msgid "" "To make sure our local version of the fork is up-to-date with the latest " "changes from the Flower repository, we can execute the following command:" msgstr "为了确保本地版本的分叉程序与 Flower 代码库的最新更改保持一致,我们可以执行以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:93 +#: ../../source/contributor-tutorial-contribute-on-github.rst:105 msgid "Setting up the coding environment" msgstr "设置编码环境" -#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +#: ../../source/contributor-tutorial-contribute-on-github.rst:107 #, fuzzy msgid "" "This can be achieved by following this :doc:`getting started guide for " @@ -1514,155 +1639,158 @@ msgid "" "code and test it, you can finally start making changes!" msgstr "您可以按照这份 \"贡献者入门指南\"__(注意,您不需要克隆版本库)来实现这一点。一旦您能够编写代码并进行测试,您就可以开始修改了!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:100 +#: ../../source/contributor-tutorial-contribute-on-github.rst:113 msgid "Making changes" msgstr "做出改变" -#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +#: ../../source/contributor-tutorial-contribute-on-github.rst:115 msgid "" "Before making any changes make sure you are up-to-date with your " "repository:" msgstr "在进行任何更改之前,请确保您的版本库是最新的:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:108 +#: ../../source/contributor-tutorial-contribute-on-github.rst:121 msgid "And with Flower's repository:" msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:122 +#: ../../source/contributor-tutorial-contribute-on-github.rst:134 msgid "**Create a new branch**" msgstr "**创建一个新分支**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:115 +#: ../../source/contributor-tutorial-contribute-on-github.rst:128 msgid "" "To make the history cleaner and easier to work with, it is good practice " "to create a new branch for each feature/project that needs to be " "implemented." msgstr "为了使历史记录更简洁、更易于操作,为每个需要实现的功能/项目创建一个新分支是个不错的做法。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:118 +#: ../../source/contributor-tutorial-contribute-on-github.rst:131 msgid "" "To do so, just run the following command inside the repository's " "directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:136 msgid "**Make changes**" msgstr "**进行修改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:125 +#: ../../source/contributor-tutorial-contribute-on-github.rst:137 msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:138 +#: ../../source/contributor-tutorial-contribute-on-github.rst:149 msgid "**Test and format your code**" msgstr "**测试并格式化您的代码**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:128 +#: ../../source/contributor-tutorial-contribute-on-github.rst:139 msgid "" "Don't forget to test and format your code! Otherwise your code won't be " "able to be merged into the Flower repository. This is done so the " "codebase stays consistent and easy to understand." msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法并入 Flower 代码库。这样做是为了使代码库保持一致并易于理解。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:131 +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 msgid "To do so, we have written a few scripts that you can execute:" msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:150 +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 msgid "**Stage changes**" msgstr "**舞台变化**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:141 +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 msgid "" "Before creating a commit that will update your history, you must specify " "to Git which files it needs to take into account." msgstr "在创建更新历史记录的提交之前,必须向 Git 说明需要考虑哪些文件。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 msgid "This can be done with:" msgstr "这可以通过:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:149 +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +#, fuzzy msgid "" "To check which files have been modified compared to the last version " "(last commit) and to see which files are staged for commit, you can use " -"the :code:`git status` command." +"the ``git status`` command." msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:160 +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 msgid "**Commit changes**" msgstr "**提交更改**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:153 +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +#, fuzzy msgid "" -"Once you have added all the files you wanted to commit using :code:`git " -"add`, you can finally create your commit using this command:" +"Once you have added all the files you wanted to commit using ``git add``," +" you can finally create your commit using this command:" msgstr "使用 :code:`git add` 添加完所有要提交的文件后,就可以使用此命令创建提交了:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:159 +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +#, fuzzy msgid "" "The \\ is there to explain to others what the commit " "does. It should be written in an imperative style and be concise. An " -"example would be :code:`git commit -m \"Add images to README\"`." +"example would be ``git commit -m \"Add images to README\"``." msgstr "" " 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " "-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:171 +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 msgid "**Push the changes to the fork**" msgstr "**将更改推送到分叉**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:163 +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 msgid "" "Once we have committed our changes, we have effectively updated our local" " history, but GitHub has no way of knowing this unless we push our " "changes to our origin's remote address:" msgstr "一旦提交了修改,我们就有效地更新了本地历史记录,但除非我们将修改推送到原点的远程地址,否则 GitHub 无法得知:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:170 +#: ../../source/contributor-tutorial-contribute-on-github.rst:184 msgid "" "Once this is done, you will see on the GitHub that your forked repo was " "updated with the changes you have made." msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根据您所做的更改进行了更新。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:174 +#: ../../source/contributor-tutorial-contribute-on-github.rst:188 msgid "Creating and merging a pull request (PR)" msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +#: ../../source/contributor-tutorial-contribute-on-github.rst:226 msgid "**Create the PR**" msgstr "**创建 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:177 +#: ../../source/contributor-tutorial-contribute-on-github.rst:191 msgid "" "Once you have pushed changes, on the GitHub webpage of your repository " "you should see the following message:" msgstr "推送更改后,在仓库的 GitHub 网页上应该会看到以下信息:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:181 +#: ../../source/contributor-tutorial-contribute-on-github.rst:196 #, fuzzy msgid "Otherwise you can always find this option in the ``Branches`` page." msgstr "否则,您可以在 \"分支 \"页面找到该选项。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 #, fuzzy msgid "" "Once you click the ``Compare & pull request`` button, you should see " "something similar to this:" msgstr "点击 \"比较和拉取请求 \"按钮后,您应该会看到类似下面的内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:187 +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 msgid "At the top you have an explanation of which branch will be merged where:" msgstr "在顶部,你可以看到关于哪个分支将被合并的说明:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:191 +#: ../../source/contributor-tutorial-contribute-on-github.rst:207 msgid "" "In this example you can see that the request is to merge the branch " "``doc-fixes`` from my forked repository to branch ``main`` from the " "Flower repository." msgstr "在这个例子中,你可以看到请求将我分叉的版本库中的分支 ``doc-fixes`` 合并到 Flower 版本库中的分支 ``main``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +#: ../../source/contributor-tutorial-contribute-on-github.rst:210 #, fuzzy msgid "" "The title should be changed to adhere to the :ref:`pr_title_format` " @@ -1672,7 +1800,7 @@ msgstr "" "应该修改标题以符合 :ref:`pr_title_format` 准则,否则将无法合并 PR。因此,在这种情况下,正确的标题可能是 " "``docs(framework:skip)修复错字``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:196 +#: ../../source/contributor-tutorial-contribute-on-github.rst:214 msgid "" "The input box in the middle is there for you to describe what your PR " "does and to link it to existing issues. We have placed comments (that " @@ -1680,167 +1808,167 @@ msgid "" "process." msgstr "中间的输入框供您描述 PR 的作用,并将其与现有问题联系起来。我们在此放置了注释(一旦 PR 打开,注释将不会显示),以指导您完成整个过程。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:199 +#: ../../source/contributor-tutorial-contribute-on-github.rst:218 #, fuzzy msgid "It is important to follow the instructions described in comments." msgstr "请务必遵守注释中的说明。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +#: ../../source/contributor-tutorial-contribute-on-github.rst:220 msgid "" "At the bottom you will find the button to open the PR. This will notify " "reviewers that a new PR has been opened and that they should look over it" " to merge or to request changes." msgstr "在底部,您可以找到打开 PR 的按钮。这将通知审核人员新的 PR 已经打开,他们应该查看该 PR 以进行合并或要求修改。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:204 +#: ../../source/contributor-tutorial-contribute-on-github.rst:224 msgid "" "If your PR is not yet ready for review, and you don't want to notify " "anyone, you have the option to create a draft pull request:" msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:230 msgid "**Making new changes**" msgstr "**作出新的改变**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +#: ../../source/contributor-tutorial-contribute-on-github.rst:229 msgid "" "Once the PR has been opened (as draft or not), you can still push new " "commits to it the same way we did before, by making changes to the branch" " associated with the PR." msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +#: ../../source/contributor-tutorial-contribute-on-github.rst:253 msgid "**Review the PR**" msgstr "**审查 PR**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:212 +#: ../../source/contributor-tutorial-contribute-on-github.rst:233 msgid "" "Once the PR has been opened or once the draft PR has been marked as " "ready, a review from code owners will be automatically requested:" msgstr "一旦 PR 被打开或 PR 草案被标记为就绪,就会自动要求代码所有者进行审核:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:216 +#: ../../source/contributor-tutorial-contribute-on-github.rst:238 msgid "" "Code owners will then look into the code, ask questions, request changes " "or validate the PR." msgstr "然后,代码所有者会查看代码、提出问题、要求修改或验证 PR。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:218 +#: ../../source/contributor-tutorial-contribute-on-github.rst:241 msgid "Merging will be blocked if there are ongoing requested changes." msgstr "如果有正在进行的更改请求,合并将被阻止。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:222 +#: ../../source/contributor-tutorial-contribute-on-github.rst:245 msgid "" "To resolve them, just push the necessary changes to the branch associated" " with the PR:" msgstr "要解决这些问题,只需将必要的更改推送到与 PR 关联的分支即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:226 +#: ../../source/contributor-tutorial-contribute-on-github.rst:250 msgid "And resolve the conversation:" msgstr "并解决对话:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:230 +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 msgid "" "Once all the conversations have been resolved, you can re-request a " "review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +#: ../../source/contributor-tutorial-contribute-on-github.rst:274 msgid "**Once the PR is merged**" msgstr "**一旦 PR 被合并**" -#: ../../source/contributor-tutorial-contribute-on-github.rst:234 +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 msgid "" "If all the automatic tests have passed and reviewers have no more changes" " to request, they can approve the PR and merge it." msgstr "如果所有自动测试都已通过,且审核员不再需要修改,他们就可以批准 PR 并将其合并。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:238 +#: ../../source/contributor-tutorial-contribute-on-github.rst:261 msgid "" "Once it is merged, you can delete the branch on GitHub (a button should " "appear to do so) and also delete it locally by doing:" msgstr "合并后,您可以在 GitHub 上删除该分支(会出现一个删除按钮),也可以在本地删除该分支:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:245 +#: ../../source/contributor-tutorial-contribute-on-github.rst:269 msgid "Then you should update your forked repository by doing:" msgstr "然后,你应该更新你的分叉仓库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 msgid "Example of first contribution" msgstr "首次捐款实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:257 +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 msgid "Problem" msgstr "问题" -#: ../../source/contributor-tutorial-contribute-on-github.rst:259 +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 #, fuzzy msgid "" "For our documentation, we've started to use the `Diàtaxis framework " "`_." msgstr "对于我们的文档,我们已经开始使用 \"Diàtaxis 框架 `_\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:261 +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 #, fuzzy msgid "" "Our \"How to\" guides should have titles that continue the sentence \"How" " to …\", for example, \"How to upgrade to Flower 1.0\"." msgstr "我们的 \"如何 \"指南的标题应延续 \"如何...... \"的句式,例如 \"如何升级到 Flower 1.0\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:263 +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 msgid "" "Most of our guides do not follow this new format yet, and changing their " "title is (unfortunately) more involved than one might think." msgstr "我们的大多数指南还没有采用这种新格式,而更改其标题(不幸的是)比人们想象的要复杂得多。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:265 +#: ../../source/contributor-tutorial-contribute-on-github.rst:291 #, fuzzy msgid "" "This issue is about changing the title of a doc from present continuous " "to present simple." msgstr "这个问题是关于将文档标题从现在进行时改为现在进行时。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:267 +#: ../../source/contributor-tutorial-contribute-on-github.rst:294 #, fuzzy msgid "" "Let's take the example of \"Saving Progress\" which we changed to \"Save " "Progress\". Does this pass our check?" msgstr "以 \"保存进度 \"为例,我们将其改为 \"保存进度\"。这是否通过了我们的检查?" -#: ../../source/contributor-tutorial-contribute-on-github.rst:269 +#: ../../source/contributor-tutorial-contribute-on-github.rst:297 #, fuzzy msgid "Before: \"How to saving progress\" ❌" msgstr "之前: \"如何保存进度\" ❌" -#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +#: ../../source/contributor-tutorial-contribute-on-github.rst:299 #, fuzzy msgid "After: \"How to save progress\" ✅" msgstr "之后: \"如何保存进度\"✅" -#: ../../source/contributor-tutorial-contribute-on-github.rst:274 +#: ../../source/contributor-tutorial-contribute-on-github.rst:302 msgid "Solution" msgstr "解决方案" -#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 #, fuzzy msgid "" "This is a tiny change, but it'll allow us to test your end-to-end setup. " "After cloning and setting up the Flower repo, here's what you should do:" msgstr "这只是一个很小的改动,但可以让我们测试你的端到端设置。克隆并设置好 Flower repo 后,你应该这样做:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:278 +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 #, fuzzy msgid "Find the source file in ``doc/source``" msgstr "在 `doc/source` 中查找源文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:279 +#: ../../source/contributor-tutorial-contribute-on-github.rst:308 #, fuzzy msgid "" "Make the change in the ``.rst`` file (beware, the dashes under the title " "should be the same length as the title itself)" msgstr "在 `.rst` 文件中进行修改(注意,标题下的破折号应与标题本身的长度相同)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +#: ../../source/contributor-tutorial-contribute-on-github.rst:310 #, fuzzy msgid "" "Build the docs and `check the result `_" -#: ../../source/contributor-tutorial-contribute-on-github.rst:283 +#: ../../source/contributor-tutorial-contribute-on-github.rst:314 msgid "Rename file" msgstr "重命名文件" -#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +#: ../../source/contributor-tutorial-contribute-on-github.rst:316 msgid "" "You might have noticed that the file name still reflects the old wording." " If we just change the file, then we break all existing links to it - it " @@ -1863,33 +1991,33 @@ msgstr "" "您可能已经注意到,文件名仍然反映了旧的措辞。如果我们只是更改文件,那么就会破坏与该文件的所有现有链接--" "避免这种情况是***重要的,破坏链接会损害我们的搜索引擎排名。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +#: ../../source/contributor-tutorial-contribute-on-github.rst:320 #, fuzzy msgid "Here's how to change the file name:" msgstr "下面是更改文件名的方法:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +#: ../../source/contributor-tutorial-contribute-on-github.rst:322 #, fuzzy msgid "Change the file name to ``save-progress.rst``" msgstr "将文件名改为`save-progress.rst`" -#: ../../source/contributor-tutorial-contribute-on-github.rst:291 +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 #, fuzzy msgid "Add a redirect rule to ``doc/source/conf.py``" msgstr "在 `doc/source/conf.py` 中添加重定向规则" -#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 #, fuzzy msgid "" "This will cause a redirect from ``saving-progress.html`` to ``save-" "progress.html``, old links will continue to work." msgstr "这将导致从 `saving-progress.html` 重定向到 `save-progress.html`,旧链接将继续工作。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:296 +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 msgid "Apply changes in the index file" msgstr "应用索引文件中的更改" -#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 #, fuzzy msgid "" "For the lateral navigation bar to work properly, it is very important to " @@ -1897,89 +2025,89 @@ msgid "" "arborescence of the navbar." msgstr "要使横向导航栏正常工作,更新 `index.rst` 文件也非常重要。我们就是在这里定义整个导航栏的结构。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 #, fuzzy msgid "Find and modify the file name in ``index.rst``" msgstr "查找并修改 `index.rst` 中的文件名" -#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +#: ../../source/contributor-tutorial-contribute-on-github.rst:338 msgid "Open PR" msgstr "开放式 PR" -#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +#: ../../source/contributor-tutorial-contribute-on-github.rst:340 #, fuzzy msgid "" "Commit the changes (commit messages are always imperative: \"Do " "something\", in this case \"Change …\")" msgstr "提交更改(提交信息总是命令式的:\"做某事\",这里是 \"更改......\")" -#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +#: ../../source/contributor-tutorial-contribute-on-github.rst:342 msgid "Push the changes to your fork" msgstr "将更改推送到分叉" -#: ../../source/contributor-tutorial-contribute-on-github.rst:308 +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 #, fuzzy msgid "" "Open a PR (as shown above) with title ``docs(framework) Update how-to " "guide title``" msgstr "打开一个 PR(如上图所示),标题为\"`docs(framework) Update how-to guide title```\"。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:309 +#: ../../source/contributor-tutorial-contribute-on-github.rst:344 msgid "Wait for it to be approved!" msgstr "等待审批!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:310 +#: ../../source/contributor-tutorial-contribute-on-github.rst:345 msgid "Congrats! 🥳 You're now officially a Flower contributor!" msgstr "祝贺你 🥳 您现在正式成为 \"Flower \"贡献者!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:314 -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:727 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/contributor-tutorial-contribute-on-github.rst:348 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:573 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1012 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:811 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:857 #: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 msgid "Next steps" msgstr "接下来的步骤" -#: ../../source/contributor-tutorial-contribute-on-github.rst:316 +#: ../../source/contributor-tutorial-contribute-on-github.rst:350 msgid "" "Once you have made your first PR, and want to contribute more, be sure to" " check out the following :" msgstr "一旦您完成了第一份 PR,并希望做出更多贡献,请务必查看以下内容:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:318 +#: ../../source/contributor-tutorial-contribute-on-github.rst:353 #, fuzzy msgid "" ":doc:`Good first contributions `, where you should particularly look into the " -":code:`baselines` contributions." +"``baselines`` contributions." msgstr "" "`优秀的首次贡献 `_,在这里你应该特别看看 :code:`baselines` 的贡献。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:322 +#: ../../source/contributor-tutorial-contribute-on-github.rst:357 #: ../../source/fed/0000-20200102-fed-template.md:60 msgid "Appendix" msgstr "附录" -#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +#: ../../source/contributor-tutorial-contribute-on-github.rst:362 #, fuzzy msgid "PR title format" msgstr "PR 标题格式" -#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +#: ../../source/contributor-tutorial-contribute-on-github.rst:364 #, fuzzy msgid "We enforce the following PR title format:" msgstr "我们执行以下 PR 标题格式:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +#: ../../source/contributor-tutorial-contribute-on-github.rst:370 #, fuzzy msgid "" "(or ``(:skip) `` to ignore the PR in the " "changelog)" msgstr "(或 ``(:skip) `` 忽略更新日志中的 PR)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:337 +#: ../../source/contributor-tutorial-contribute-on-github.rst:372 #, fuzzy msgid "" "Where ```` needs to be in ``{ci, fix, feat, docs, refactor, " @@ -1992,59 +2120,59 @@ msgstr "" "```` 应该使用 ``{framework, baselines, datasets, examples, 或者 '*' " "当修改多个项目时需要使用 ':skip'标记}``, 并且 ```` 应该以一个大写的动词开始。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +#: ../../source/contributor-tutorial-contribute-on-github.rst:377 #, fuzzy msgid "Valid examples:" msgstr "实例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +#: ../../source/contributor-tutorial-contribute-on-github.rst:379 #, fuzzy msgid "``feat(framework) Add flwr build CLI command``" msgstr "`feat(框架) 添加 flwr build CLI 命令```" -#: ../../source/contributor-tutorial-contribute-on-github.rst:344 +#: ../../source/contributor-tutorial-contribute-on-github.rst:380 #, fuzzy msgid "``refactor(examples:skip) Improve quickstart-pytorch logging``" msgstr "``refactor(examples:skip) Improve quickstart-pytorch logging``." -#: ../../source/contributor-tutorial-contribute-on-github.rst:345 +#: ../../source/contributor-tutorial-contribute-on-github.rst:381 #, fuzzy msgid "``ci(*:skip) Enforce PR title format``" msgstr "`ci(*:skip)执行 PR 标题格式``。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:347 +#: ../../source/contributor-tutorial-contribute-on-github.rst:383 #, fuzzy msgid "Invalid examples:" msgstr "模拟示例" -#: ../../source/contributor-tutorial-contribute-on-github.rst:349 +#: ../../source/contributor-tutorial-contribute-on-github.rst:385 #, fuzzy msgid "``feat(framework): Add flwr build CLI command`` (extra ``:``)" msgstr "`feat(框架): 添加 flwr build CLI 命令``(额外的``:``)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:350 +#: ../../source/contributor-tutorial-contribute-on-github.rst:386 #, fuzzy msgid "" "``feat(*) Add flwr build CLI command`` (missing ``skip`` flag along with " "``*``)" msgstr "`feat(*)添加flwr构建CLI命令``(缺少``skip``标志和``*``)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:351 +#: ../../source/contributor-tutorial-contribute-on-github.rst:387 #, fuzzy msgid "``feat(skip) Add flwr build CLI command`` (missing ````)" msgstr "`feat(skip)添加flwr构建CLI命令``(缺少```)。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:352 +#: ../../source/contributor-tutorial-contribute-on-github.rst:388 #, fuzzy msgid "``feat(framework) add flwr build CLI command`` (non capitalised verb)" msgstr "`feat(framework)添加 flwr 构建 CLI 命令``(非大写动词)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:353 +#: ../../source/contributor-tutorial-contribute-on-github.rst:389 #, fuzzy msgid "``feat(framework) Add flwr build CLI command.`` (dot at the end)" msgstr "feat(框架) 添加 flwr 构建 CLI 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:354 +#: ../../source/contributor-tutorial-contribute-on-github.rst:390 #, fuzzy msgid "``Add flwr build CLI command.`` (missing ``()``)" msgstr "" @@ -2056,14 +2184,18 @@ msgid "Get started as a contributor" msgstr "成为贡献者" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 -#: ../../source/how-to-run-flower-using-docker.rst:153 +#: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:16 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:13 +#: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" msgstr "先决条件" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 #, fuzzy -msgid "`Python 3.8 `_ or above" -msgstr "Python 3.7 `_ 或更高版本" +msgid "`Python 3.9 `_ or above" +msgstr "Python 3.9 `_ 或更高版本" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 msgid "`Poetry 1.3 `_ or above" @@ -2080,20 +2212,20 @@ msgstr "(可选) `pyenv-virtualenv #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 #, fuzzy msgid "" -"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"Flower uses ``pyproject.toml`` to manage dependencies and configure " "development tools (the ones which support it). Poetry is a build tool " "which supports `PEP 517 `_." msgstr "" "Flower 使用 :code:`pyproject.toml` 来管理依赖关系和配置开发工具(支持它的)。Poetry 是一种支持 `PEP " "517 `_ 的构建工具。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:17 msgid "Developer Machine Setup" msgstr "开发者机器设置" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:21 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 #, fuzzy -msgid "Preliminarities" +msgid "Preliminaries" msgstr "前言" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:22 @@ -2113,110 +2245,110 @@ msgid "" "installation actions to add `brew` to your PATH." msgstr "安装 `homebrew `_。别忘了安装后的操作,将 `brew` 添加到你的 PATH。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:28 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:29 #, fuzzy msgid "" "Install `xz` (to install different Python versions) and `pandoc` to build" -" the docs::" +" the docs:" msgstr "安装 `xz`(用于安装不同的 Python 版本)和 `pandoc` 以构建文档::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:34 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:36 #, fuzzy msgid "For Ubuntu" msgstr "针对 Ubuntu" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:35 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:38 #, fuzzy msgid "" "Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all " -"necessary packages::" +"necessary packages:" msgstr "确保您的系统(Ubuntu 22.04+)为最新版本,并安装了所有必要的软件包::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:44 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:47 #, fuzzy msgid "Create Flower Dev Environment" msgstr "创建/删除虚拟环境" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:49 #, fuzzy msgid "" -"1. Clone the `Flower repository `_ from " -"GitHub::" +"Clone the `Flower repository `_ from " +"GitHub:" msgstr "首先,从 GitHub 克隆 \"Flower 存储库 `_\":" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:52 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:56 #, fuzzy msgid "" "Let's create the Python environment for all-things Flower. If you wish to" -" use :code:`pyenv`, we provide two convenience scripts that you can use. " -"If you prefer using something else than :code:`pyenv`, create a new " -"environment, activate and skip to the last point where all packages are " -"installed." +" use ``pyenv``, we provide two convenience scripts that you can use. If " +"you prefer using something else than ``pyenv``, create a new environment," +" activate and skip to the last point where all packages are installed." msgstr "" "让我们为 Flower 创建一个 Python 环境。如果您想使用 :code:`pyenv`,我们提供了两个方便的脚本供您使用。如果你不喜欢使用" " :code:`pyenv`,请创建一个新环境,激活并跳到最后一点,即安装所有软件包。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:61 #, fuzzy msgid "" -"If you don't have :code:`pyenv` installed, the following script that will" -" install it, set it up, and create the virtual environment (with " -":code:`Python 3.8.17` by default)::" +"If you don't have ``pyenv`` installed, the following script that will " +"install it, set it up, and create the virtual environment (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " +"3.9.20)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:58 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 #, fuzzy msgid "" -"If you already have :code:`pyenv` installed (along with the :code:`pyenv-" -"virtualenv` plugin), you can use the following convenience script (with " -":code:`Python 3.8.17` by default)::" +"If you already have ``pyenv`` installed (along with the ``pyenv-" +"virtualenv`` plugin), you can use the following convenience script (with " +":substitution-code:`Python |python_full_version|` by default):" msgstr "" -"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 " -":code:`Python3.8.17)::" +"如果没有安装 :code:`pyenv`,可以使用以下脚本安装 pyenv、设置并创建虚拟环境(默认使用 :code:`Python " +"3.9.20)::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 #, fuzzy msgid "" -"3. Install the Flower package in development mode (think :code:`pip " -"install -e`) along with all necessary dependencies::" +"3. Install the Flower package in development mode (think ``pip install " +"-e``) along with all necessary dependencies:" msgstr "第三,在开发模式下安装 Flower 软件包(想想 :code:`pip install -e`)以及所有必要的依赖项::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 msgid "Convenience Scripts" msgstr "便捷脚本" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:71 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:87 +#, fuzzy msgid "" "The Flower repository contains a number of convenience scripts to make " -"recurring development tasks easier and less error-prone. See the " -":code:`/dev` subdirectory for a full list. The following scripts are " -"amongst the most important ones:" +"recurring development tasks easier and less error-prone. See the ``/dev``" +" subdirectory for a full list. The following scripts are amongst the most" +" important ones:" msgstr "Flower 软件仓库包含大量便捷脚本,可使重复性开发任务更轻松、更不易出错。完整列表请参见 :code:`/dev` 子目录。以下是最重要的脚本:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:77 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 msgid "Create/Delete Virtual Environment" msgstr "创建/删除虚拟环境" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:101 msgid "Compile ProtoBuf Definitions" msgstr "编译 ProtoBuf 定义" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 msgid "Auto-Format Code" msgstr "自动格式化代码" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:115 msgid "Run Linters and Tests" msgstr "运行分类器和测试" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:106 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:122 #, fuzzy msgid "Add a pre-commit hook" msgstr "添加预先提交钩子" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:108 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 #, fuzzy msgid "" "Developers may integrate a pre-commit hook into their workflow utilizing " @@ -2228,31 +2360,31 @@ msgstr "" "库将预提交钩子集成到工作流程中。预提交钩子被配置为执行两个主要操作: `./dev/format.sh`` 和 ``./dev/test.sh``" " 脚本。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:110 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:128 #, fuzzy msgid "There are multiple ways developers can use this:" msgstr "开发人员可以通过多种方式使用它:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:112 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 #, fuzzy msgid "Install the pre-commit hook to your local git directory by simply running:" msgstr "在本地 git 目录中安装预提交钩子,只需运行" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:118 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:136 #, fuzzy msgid "" "Each ``git commit`` will trigger the execution of formatting and " "linting/test scripts." msgstr "每次 \"git 提交 \"都会触发格式化和内核/测试脚本的执行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:138 #, fuzzy msgid "" "If in a hurry, bypass the hook using ``--no-verify`` with the ``git " -"commit`` command. ::" +"commit`` command." msgstr "如果赶时间,可使用 ``--no-verify`` 和 ``git commit` 命令绕过钩子:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:124 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:145 #, fuzzy msgid "" "For developers who prefer not to install the hook permanently, it is " @@ -2260,24405 +2392,27828 @@ msgid "" " the following command:" msgstr "对于不想永久安装钩子的开发人员,可以使用以下命令在提交更改之前执行一次性检查:" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:130 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:152 #, fuzzy msgid "" "This executes the formatting and linting checks/tests on all the files " "without modifying the default behavior of ``git commit``." msgstr "这将在不修改 ``git commit`` 默认行为的情况下对所有文件执行格式化和词排检查/测试。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:133 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:156 msgid "Run Github Actions (CI) locally" msgstr "在本地运行 Github 操作 (CI)" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:135 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:158 #, fuzzy msgid "" "Developers could run the full set of Github Actions workflows under their" " local environment by using `Act `_. " "Please refer to the installation instructions under the linked repository" -" and run the next command under Flower main cloned repository folder::" +" and run the next command under Flower main cloned repository folder:" msgstr "" "开发人员可以使用 `Act _` 在本地环境下运行全套 Github Actions" " 工作流程。请参考链接仓库下的安装说明,并在 Flower 主克隆仓库文件夹下运行下一条命令::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:142 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 msgid "" "The Flower default workflow would run by setting up the required Docker " "machines underneath." msgstr "Flower 默认工作流程将通过在下面设置所需的 Docker 机器来运行。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:147 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:171 msgid "Build Release" msgstr "版本发布" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:149 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:173 +#, fuzzy msgid "" "Flower uses Poetry to build releases. The necessary command is wrapped in" -" a simple script::" +" a simple script:" msgstr "Flower 使用 Poetry 创建发布版本。必要的命令封装在一个简单的脚本中::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:154 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:180 +#, fuzzy msgid "" -"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" -" the :code:`/dist` subdirectory." +"The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the " +"``/dist`` subdirectory." msgstr "生成的 :code:`.whl` 和 :code:`.tar.gz` 版本将存储在 :code:`/dist` 子目录中。" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:159 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:184 msgid "Build Documentation" msgstr "构建文档" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:161 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:186 +#, fuzzy msgid "" "Flower's documentation uses `Sphinx `_. " "There's no convenience script to re-build the documentation yet, but it's" -" pretty easy::" +" pretty easy:" msgstr "" "Flower 的文档使用 `Sphinx `_。目前还没有很方便的脚本来重新构建文档,不过这很容易::" -#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:167 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:194 msgid "This will generate HTML documentation in ``doc/build/html``." msgstr "这将在 ``doc/build/html`` 中生成 HTML 文档。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: FedBN in PyTorch - From Centralized To Federated" -msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" - -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/enable-tls.rst:2 #, fuzzy +msgid "Enable TLS for Secure Connections" +msgstr "启用 SSL 连接" + +#: ../../source/docker/enable-tls.rst:4 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload with `FedBN " -"`_, a federated training strategy " -"designed for non-iid data. We are using PyTorch to train a Convolutional " -"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " -"When applying FedBN, only few changes needed compared to :doc:`Example: " -"PyTorch - From Centralized To Federated `." +"When operating in a production environment, it is strongly recommended to" +" enable Transport Layer Security (TLS) for each Flower Component to " +"ensure secure communication." msgstr "" -"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " -"从集中式到联邦式 `_ 做少量改动。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 -msgid "Centralized Training" -msgstr "集中式训练" +#: ../../source/docker/enable-tls.rst:7 +#, fuzzy +msgid "" +"To enable TLS, you will need a PEM-encoded root certificate, a PEM-" +"encoded private key and a PEM-encoded certificate chain." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +#: ../../source/docker/enable-tls.rst:12 #, fuzzy msgid "" -"All files are revised based on :doc:`Example: PyTorch - From Centralized " -"To Federated `. The only " -"thing to do is modifying the file called :code:`cifar.py`, revised part " -"is shown below:" +"For testing purposes, you can generate your own self-signed certificates." +" The `Enable SSL connections `__ page contains a section that" +" will guide you through the process." msgstr "" -"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " -"的文件,修改部分如下所示:" +"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +#: ../../source/docker/enable-tls.rst:17 msgid "" -"The model architecture defined in class Net() is added with Batch " -"Normalization layers accordingly." -msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" +"Because Flower containers, by default, run with a non-root user ``app``, " +"the mounted files and directories must have the proper permissions for " +"the user ID ``49999``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 -msgid "You can now run your machine learning workload:" -msgstr "现在,您可以运行您的机器学习工作了:" +#: ../../source/docker/enable-tls.rst:20 +msgid "" +"For example, to change the user ID of all files in the ``certificates/`` " +"directory, you can run ``sudo chown -R 49999:49999 certificates/*``." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/docker/enable-tls.rst:23 +#: ../../source/docker/persist-superlink-state.rst:15 +msgid "" +"If you later want to delete the directory, you can change the user ID " +"back to the current user ID by running ``sudo chown -R $USER:$(id -gn) " +"state``." +msgstr "" + +#: ../../source/docker/enable-tls.rst:27 #, fuzzy +msgid "SuperLink" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:29 msgid "" -"So far this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"federated learning system within FedBN, the system consists of one server" -" and two clients." +"Assuming all files we need are in the local ``certificates`` directory, " +"we can use the flag ``--volume`` to mount the local directory into the " +"``/app/certificates/`` directory of the container:" msgstr "" -"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " -"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 -msgid "Federated Training" -msgstr "联邦培训" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "Understanding the command" +msgstr "训练模型" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#: ../../source/docker/enable-tls.rst:45 ../../source/docker/enable-tls.rst:92 +#: ../../source/docker/enable-tls.rst:125 +#: ../../source/docker/tutorial-quickstart-docker.rst:66 +#: ../../source/docker/tutorial-quickstart-docker.rst:103 +#: ../../source/docker/tutorial-quickstart-docker.rst:217 +#: ../../source/docker/tutorial-quickstart-docker.rst:305 #, fuzzy +msgid "``docker run``: This tells Docker to run a container from an image." +msgstr "`docker run``: 这是运行新 Docker 容器的命令。" + +#: ../../source/docker/enable-tls.rst:46 ../../source/docker/enable-tls.rst:93 +#: ../../source/docker/enable-tls.rst:126 +#: ../../source/docker/tutorial-quickstart-docker.rst:67 +#: ../../source/docker/tutorial-quickstart-docker.rst:104 +#: ../../source/docker/tutorial-quickstart-docker.rst:218 +#: ../../source/docker/tutorial-quickstart-docker.rst:306 +msgid "``--rm``: Remove the container once it is stopped or the command exits." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"If you have read :doc:`Example: PyTorch - From Centralized To Federated " -"`, the following parts are" -" easy to follow, only :code:`get_parameters` and :code:`set_parameters` " -"function in :code:`client.py` needed to revise. If not, please read the " -":doc:`Example: PyTorch - From Centralized To Federated `. first." +"``--volume ./certificates/:/app/certificates/:ro``: Mount the " +"``certificates`` directory in" msgstr "" -"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " -":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " -"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. In FedBN, " -":code:`server.py` keeps unchanged, we can start the server directly." -msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" +"the current working directory of the host machine as a read-only volume " +"at the" +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +#: ../../source/docker/enable-tls.rst +msgid "``/app/certificates`` directory inside the container." +msgstr "" + +#: ../../source/docker/enable-tls.rst msgid "" -"Finally, we will revise our *client* logic by changing " -":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " -"we will exclude batch normalization parameters from model parameter list " -"when sending to or receiving from the server." +"This allows the container to access the TLS certificates that are stored " +"in the certificates" msgstr "" -"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " -":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " -"normalization层的参数。" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 -msgid "Now, you can now open two additional terminal windows and run" -msgstr "现在,您可以打开另外两个终端窗口并运行程序" +#: ../../source/docker/enable-tls.rst +msgid "directory." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +#: ../../source/docker/enable-tls.rst +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your (previously centralized) PyTorch project run federated " -"learning with FedBN strategy across two clients. Congratulations!" -msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" +":substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of " +"the image to be run and the specific" +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 -#: ../../source/example-jax-from-centralized-to-federated.rst:277 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 -#: ../../source/tutorial-quickstart-jax.rst:283 -msgid "Next Steps" -msgstr "下一步工作" +#: ../../source/docker/enable-tls.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a specific version of the image." +msgstr "" -#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +#: ../../source/docker/enable-tls.rst msgid "" -"The full source code for this example can be found `here " -"`_. Our example is of course somewhat over-" -"simplified because both clients load the exact same dataset, which isn't " -"realistic. You're now prepared to explore this topic further. How about " -"using different subsets of CIFAR-10 on each client? How about adding more" -" clients?" +"``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA" +" certificate file" msgstr "" -"本示例的完整源代码可在 `_ " -"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " -"CIFAR-10 子集,或者增加客户端的数量。" -#: ../../source/example-jax-from-centralized-to-federated.rst:2 -msgid "Example: JAX - Run JAX Federated" -msgstr "示例: JAX - 运行联邦式 JAX" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/example-jax-from-centralized-to-federated.rst:4 -#: ../../source/tutorial-quickstart-jax.rst:10 +#: ../../source/docker/enable-tls.rst msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing JAX workload. We are using JAX to train a linear " -"regression model on a scikit-learn dataset. We will structure the example" -" similar to our `PyTorch - From Centralized To Federated " -"`_ walkthrough. First, we build a centralized " -"training approach based on the `Linear Regression with JAX " -"`_" -" tutorial`. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"The ``certificates/ca.crt`` file is a certificate that is used to verify " +"the identity of the" msgstr "" -"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " -"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " -"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " -"`_" -" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/example-jax-from-centralized-to-federated.rst:10 -#: ../../source/tutorial-quickstart-jax.rst:16 +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "SuperLink." +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst msgid "" -"Before we start building our JAX example, we need install the packages " -":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperLink's" msgstr "" -"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " -"和 :code:`flwr`:" -#: ../../source/example-jax-from-centralized-to-federated.rst:18 -#: ../../source/tutorial-quickstart-jax.rst:24 -msgid "Linear Regression with JAX" -msgstr "使用 JAX 进行线性回归" +#: ../../source/docker/enable-tls.rst +msgid "TLS certificate file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:20 -#: ../../source/tutorial-quickstart-jax.rst:26 +#: ../../source/docker/enable-tls.rst msgid "" -"We begin with a brief description of the centralized training code based " -"on a :code:`Linear Regression` model. If you want a more in-depth " -"explanation of what's going on then have a look at the official `JAX " -"documentation `_." +"The ``certificates/server.pem`` file is used to identify the SuperLink " +"and to encrypt the" msgstr "" -"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " -"`JAX 文档 `_。" -#: ../../source/example-jax-from-centralized-to-federated.rst:23 -#: ../../source/tutorial-quickstart-jax.rst:29 -msgid "" -"Let's create a new file called :code:`jax_training.py` with all the " -"components required for a traditional (centralized) linear regression " -"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " -"be imported. In addition, we need to import :code:`sklearn` since we use " -":code:`make_regression` for the dataset and :code:`train_test_split` to " -"split the dataset into a training and test set. You can see that we do " -"not yet import the :code:`flwr` package for federated learning. This will" -" be done later." +#: ../../source/docker/enable-tls.rst +msgid "data that is transmitted over the network." msgstr "" -"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " -"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " -":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " -"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" -#: ../../source/example-jax-from-centralized-to-federated.rst:37 -#: ../../source/tutorial-quickstart-jax.rst:43 +#: ../../source/docker/enable-tls.rst msgid "" -"The :code:`load_data()` function loads the mentioned training and test " -"sets." -msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperLink's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:47 -#: ../../source/tutorial-quickstart-jax.rst:53 -msgid "" -"The model architecture (a very simple :code:`Linear Regression` model) is" -" defined in :code:`load_model()`." -msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" +#: ../../source/docker/enable-tls.rst +msgid "TLS private key file inside the container." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:59 -#: ../../source/tutorial-quickstart-jax.rst:65 +#: ../../source/docker/enable-tls.rst msgid "" -"We now need to define the training (function :code:`train()`), which " -"loops over the training set and measures the loss (function " -":code:`loss_fn()`) for each batch of training examples. The loss function" -" is separate since JAX takes derivatives with a :code:`grad()` function " -"(defined in the :code:`main()` function and called in :code:`train()`)." +"The ``certificates/server.key`` file is used to decrypt the data that is " +"transmitted over" msgstr "" -"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " -":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " -"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" -#: ../../source/example-jax-from-centralized-to-federated.rst:77 -#: ../../source/tutorial-quickstart-jax.rst:83 +#: ../../source/docker/enable-tls.rst +msgid "the network." +msgstr "" + +#: ../../source/docker/enable-tls.rst:72 +#, fuzzy +msgid "SuperNode" +msgstr "flower-superlink" + +#: ../../source/docker/enable-tls.rst:74 +#, fuzzy msgid "" -"The evaluation of the model is defined in the function " -":code:`evaluation()`. The function takes all test examples and measures " -"the loss of the linear regression model." -msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" +"Assuming that the ``ca.crt`` certificate already exists locally, we can " +"use the flag ``--volume`` to mount the local certificate into the " +"container's ``/app/`` directory." +msgstr "" +"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " +"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/example-jax-from-centralized-to-federated.rst:88 -#: ../../source/tutorial-quickstart-jax.rst:94 +#: ../../source/docker/enable-tls.rst:79 msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our model using JAX. " -"As already mentioned, the :code:`jax.grad()` function is defined in " -":code:`main()` and passed to :code:`train()`." +"If you're generating self-signed certificates and the ``ca.crt`` " +"certificate doesn't exist on the SuperNode, you can copy it over after " +"the generation step." msgstr "" -"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " -"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " -":code:`train()`。" -#: ../../source/example-jax-from-centralized-to-federated.rst:105 -#: ../../source/tutorial-quickstart-jax.rst:111 -msgid "You can now run your (centralized) JAX linear regression workload:" -msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" +#: ../../source/docker/enable-tls.rst +msgid "``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:111 -#: ../../source/tutorial-quickstart-jax.rst:117 +#: ../../source/docker/enable-tls.rst msgid "" -"So far this should all look fairly familiar if you've used JAX before. " -"Let's take the next step and use what we've built to create a simple " -"federated learning system consisting of one server and two clients." -msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" +"current working directory of the host machine as a read-only volume at " +"the ``/app/ca.crt``" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:115 -#: ../../source/tutorial-quickstart-jax.rst:121 -msgid "JAX meets Flower" -msgstr "JAX 结合 Flower" +#: ../../source/docker/enable-tls.rst +#, fuzzy +msgid "directory inside the container." +msgstr "使用 VSCode Dev Containers 进行开发" -#: ../../source/example-jax-from-centralized-to-federated.rst:117 -#: ../../source/tutorial-quickstart-jax.rst:123 +#: ../../source/docker/enable-tls.rst msgid "" -"The concept of federating an existing workload is always the same and " -"easy to understand. We have to start a *server* and then use the code in " -":code:`jax_training.py` for the *clients* that are connected to the " -"*server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server*, which averages all received " -"parameter updates. This describes one round of the federated learning " -"process, and we repeat this for multiple rounds." +":substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of " +"the image to be run and the specific" msgstr "" -"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " -":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" -#: ../../source/example-jax-from-centralized-to-federated.rst:123 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 -#: ../../source/tutorial-quickstart-jax.rst:129 +#: ../../source/docker/enable-tls.rst msgid "" -"Our example consists of one *server* and two *clients*. Let's set up " -":code:`server.py` first. The *server* needs to import the Flower package " -":code:`flwr`. Next, we use the :code:`start_server` function to start a " -"server and tell it to perform three rounds of federated learning." +"``--root-certificates ca.crt``: This specifies the location of the CA " +"certificate file" msgstr "" -"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " -":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" -#: ../../source/example-jax-from-centralized-to-federated.rst:133 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 -#: ../../source/tutorial-quickstart-jax.rst:139 -msgid "We can already start the *server*:" -msgstr "我们已经可以启动*服务器*了:" +#: ../../source/docker/enable-tls.rst +msgid "The ``ca.crt`` file is used to verify the identity of the SuperLink." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:139 -#: ../../source/tutorial-quickstart-jax.rst:145 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined JAX training in :code:`jax_training.py`. Our" -" *client* needs to import :code:`flwr`, but also :code:`jax` and " -":code:`jaxlib` to update the parameters on our JAX model:" +#: ../../source/docker/enable-tls.rst:105 +msgid "SuperExec" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " -":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " -":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" -#: ../../source/example-jax-from-centralized-to-federated.rst:154 -#: ../../source/tutorial-quickstart-jax.rst:160 +#: ../../source/docker/enable-tls.rst:107 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " -"easier to implement than :code:`Client` if you use a framework with good " -"NumPy interoperability (like JAX) because it avoids some of the " -"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" -" to implement four methods, two methods for getting/setting model " -"parameters, one method for training the model, and one method for testing" -" the model:" +"Assuming all files we need are in the local ``certificates`` directory " +"where the SuperExec will be executed from, we can use the flag " +"``--volume`` to mount the local directory into the ``/app/certificates/``" +" directory of the container:" msgstr "" -"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " -"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " -":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" - -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid ":code:`set_parameters (optional)`" -msgstr ":code:`set_parameters (可选)`" -#: ../../source/example-jax-from-centralized-to-federated.rst:160 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -#: ../../source/tutorial-quickstart-jax.rst:166 +#: ../../source/docker/enable-tls.rst msgid "" -"set the model parameters on the local model that are received from the " -"server" -msgstr "在本地模型上设置从服务器接收的模型参数" +":substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of " +"the image to be run and the specific" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:161 -#: ../../source/tutorial-quickstart-jax.rst:167 -msgid "transform parameters to NumPy :code:`ndarray`'s" -msgstr "将参数转换为 NumPy :code:`ndarray`格式" +#: ../../source/docker/enable-tls.rst +msgid "SuperExec." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:162 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 -#: ../../source/tutorial-quickstart-jax.rst:168 +#: ../../source/docker/enable-tls.rst msgid "" -"loop over the list of model parameters received as NumPy " -":code:`ndarray`'s (think list of neural network layers)" -msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" - -#: ../../source/example-jax-from-centralized-to-federated.rst:163 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 -#: ../../source/tutorial-quickstart-jax.rst:169 -#: ../../source/tutorial-quickstart-pytorch.rst:155 -#: ../../source/tutorial-quickstart-scikitlearn.rst:118 -msgid ":code:`get_parameters`" -msgstr ":code:`get_parameters`" +"``--ssl-certfile certificates/server.pem``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:164 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 -#: ../../source/tutorial-quickstart-jax.rst:170 +#: ../../source/docker/enable-tls.rst msgid "" -"get the model parameters and return them as a list of NumPy " -":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +"The ``certificates/server.pem`` file is used to identify the SuperExec " +"and to encrypt the" msgstr "" -"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " -":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid ":code:`fit`" -msgstr ":code:`fit`" - -#: ../../source/example-jax-from-centralized-to-federated.rst:166 -#: ../../source/example-jax-from-centralized-to-federated.rst:170 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 -#: ../../source/tutorial-quickstart-jax.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:176 +#: ../../source/docker/enable-tls.rst msgid "" -"update the parameters of the local model with the parameters received " -"from the server" -msgstr "用从服务器接收到的参数更新本地模型的参数" - -#: ../../source/example-jax-from-centralized-to-federated.rst:167 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 -#: ../../source/tutorial-quickstart-jax.rst:173 -msgid "train the model on the local training set" -msgstr "在本地训练集上训练模型" +"``--ssl-keyfile certificates/server.key``: Specify the location of the " +"SuperExec's" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:168 -#: ../../source/tutorial-quickstart-jax.rst:174 -msgid "get the updated local model parameters and return them to the server" -msgstr "获取更新后的本地模型参数并返回服务器" +#: ../../source/docker/enable-tls.rst +msgid "" +"``--executor-config root-" +"certificates=\\\"certificates/superlink_ca.crt\\\"``: Specify the" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -#: ../../source/tutorial-quickstart-jax.rst:178 -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid ":code:`evaluate`" -msgstr ":code:`evaluate`" +#: ../../source/docker/enable-tls.rst +msgid "" +"location of the CA certificate file inside the container that the " +"SuperExec executor" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:171 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 -#: ../../source/tutorial-quickstart-jax.rst:177 -msgid "evaluate the updated model on the local test set" -msgstr "在本地测试集上评估更新后的模型" +#: ../../source/docker/enable-tls.rst +msgid "should use to verify the SuperLink's identity." +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:172 -#: ../../source/tutorial-quickstart-jax.rst:178 -msgid "return the local loss to the server" -msgstr "向服务器返回本地损失值" +#: ../../source/docker/index.rst:2 +#, fuzzy +msgid "Run Flower using Docker" +msgstr "使用 Docker 运行 Flower" -#: ../../source/example-jax-from-centralized-to-federated.rst:174 -#: ../../source/tutorial-quickstart-jax.rst:180 +#: ../../source/docker/index.rst:4 msgid "" -"The challenging part is to transform the JAX model parameters from " -":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" -" `NumPyClient`." +"Start your Flower journey with our pre-made Docker images on Docker Hub, " +"supporting ``amd64`` and ``arm64v8`` architectures." msgstr "" -"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " -"`NumPyClient` 兼容。" -#: ../../source/example-jax-from-centralized-to-federated.rst:176 -#: ../../source/tutorial-quickstart-jax.rst:182 +#: ../../source/docker/index.rst:7 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`evaluate()` previously " -"defined in :code:`jax_training.py`. So what we really do here is we tell " -"Flower through our :code:`NumPyClient` subclass which of our already " -"defined functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Our Quickstart guide walks you through containerizing a Flower project " +"and running it end to end using Docker." msgstr "" -"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " -":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#: ../../source/example-jax-from-centralized-to-federated.rst:245 -#: ../../source/tutorial-quickstart-jax.rst:251 -msgid "Having defined the federation process, we can run it." -msgstr "定义了联邦进程后,我们就可以运行它了。" +#: ../../source/docker/index.rst:11 +#, fuzzy +msgid "Getting Started" +msgstr "开始" -#: ../../source/example-jax-from-centralized-to-federated.rst:268 -#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 -#: ../../source/tutorial-quickstart-jax.rst:274 -msgid "And that's it. You can now open two additional terminal windows and run" -msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" +#: ../../source/docker/index.rst:19 +msgid "Running in Production" +msgstr "" -#: ../../source/example-jax-from-centralized-to-federated.rst:274 -#: ../../source/tutorial-quickstart-jax.rst:280 -msgid "" -"in each window (make sure that the server is still running before you do " -"so) and see your JAX project run federated learning across two clients. " -"Congratulations!" -msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" +#: ../../source/docker/index.rst:28 +#, fuzzy +msgid "Advanced Options" +msgstr "高级安装选项" -#: ../../source/example-jax-from-centralized-to-federated.rst:279 -#: ../../source/tutorial-quickstart-jax.rst:285 -msgid "" -"The source code of this example was improved over time and can be found " -"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " -"clients load the same dataset." +#: ../../source/docker/index.rst:40 +#, fuzzy +msgid "Run Flower using Docker Compose" +msgstr "使用 Docker 运行 Flower" + +#: ../../source/docker/persist-superlink-state.rst:2 +msgid "Persist the State of the SuperLink" msgstr "" -"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " -"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" -#: ../../source/example-jax-from-centralized-to-federated.rst:282 -#: ../../source/tutorial-quickstart-jax.rst:288 +#: ../../source/docker/persist-superlink-state.rst:4 +#, fuzzy msgid "" -"You're now prepared to explore this topic further. How about using a more" -" sophisticated model or using a different dataset? How about adding more " -"clients?" -msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" - -#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 -msgid "Example: PyTorch - From Centralized To Federated" -msgstr "实例: PyTorch - 从集中式到联邦式" +"By default, the Flower SuperLink keeps its state in-memory. When using " +"the Docker flag ``--rm``, the state is not persisted between container " +"starts." +msgstr "" +"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` " +"时,状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件中。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +#: ../../source/docker/persist-superlink-state.rst:7 msgid "" -"This tutorial will show you how to use Flower to build a federated " -"version of an existing machine learning workload. We are using PyTorch to" -" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " -"introduce this machine learning task with a centralized training approach" -" based on the `Deep Learning with PyTorch " -"`_ " -"tutorial. Then, we build upon the centralized training code to run the " -"training in a federated fashion." +"If you want to persist the state of the SuperLink on your host system, " +"all you need to do is specify a directory where you want to save the file" +" on your host system and a name for the database file." msgstr "" -"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " -"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " -"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/docker/persist-superlink-state.rst:11 msgid "" -"We begin with a brief description of the centralized CNN training code. " -"If you want a more in-depth explanation of what's going on then have a " -"look at the official `PyTorch tutorial " -"`_." +"By default, the SuperLink container runs with a non-root user called " +"``app`` with the user ID ``49999``. It is recommended to create a new " +"directory and change the user ID of the directory to ``49999`` to ensure " +"the mounted directory has the proper permissions." msgstr "" -"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " -"`_。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +#: ../../source/docker/persist-superlink-state.rst:21 +#, fuzzy msgid "" -"Let's create a new file called :code:`cifar.py` with all the components " -"required for a traditional (centralized) training on CIFAR-10. First, all" -" required packages (such as :code:`torch` and :code:`torchvision`) need " -"to be imported. You can see that we do not import any package for " -"federated learning. You can keep all these imports as they are even when " -"we add the federated learning components at a later point." +"In the example below, we create a new directory called ``state``, change " +"the user ID and tell Docker via the flag ``--volume`` to mount the local " +"``state`` directory into the ``/app/state`` directory of the container. " +"Lastly, we use the flag ``--database`` to specify the name of the " +"database file." msgstr "" -"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " -"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " -":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" +"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉" +" Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" +" 来指定数据库文件的名称。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +#: ../../source/docker/persist-superlink-state.rst:36 +#, fuzzy msgid "" -"As already mentioned we will use the CIFAR-10 dataset for this machine " -"learning workload. The model architecture (a very simple Convolutional " -"Neural Network) is defined in :code:`class Net()`." +"As soon as the SuperLink starts, the file ``state.db`` is created in the " +"``state`` directory on your host system. If the file already exists, the " +"SuperLink tries to restore the state from the file. To start the " +"SuperLink with an empty database, ensure that there is no database called" +" ``state.db`` in the ``state`` directory (``rm state.db``) before you " +"execute the ``docker run`` command above." msgstr "" -"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " -"中定义。" +"服务器一启动,就会在主机系统的用户主目录下创建文件 " +"``state.db``。如果该文件已经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 ``state.db`` 文件即可。" + +#: ../../source/docker/pin-version.rst:2 +#, fuzzy +msgid "Pin a Docker Image to a Specific Version" +msgstr "将 Docker 映像固定到特定版本" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +#: ../../source/docker/pin-version.rst:4 +#, fuzzy msgid "" -"The :code:`load_data()` function loads the CIFAR-10 training and test " -"sets. The :code:`transform` normalized the data after loading." +"It may happen that we update the images behind the tags. Such updates " +"usually include security updates of system dependencies that should not " +"change the functionality of Flower. However, if you want to ensure that " +"you use a fixed version of the Docker image in your deployments, you can " +"`specify the digest " +"`_ of the image instead of the tag." msgstr "" -":code:`load_data()` 函数加载 CIFAR-10 " -"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" +"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " +"的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +#: ../../source/docker/pin-version.rst:14 +#, fuzzy msgid "" -"We now need to define the training (function :code:`train()`) which loops" -" over the training set, measures the loss, backpropagates it, and then " -"takes one optimizer step for each batch of training examples." -msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" +"The following command returns the current image digest referenced by the " +":substitution-code:`superlink:|stable_flwr_version|` tag:" +msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 -msgid "" -"The evaluation of the model is defined in the function :code:`test()`. " -"The function loops over all test samples and measures the loss of the " -"model based on the test dataset." -msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" +#: ../../source/docker/pin-version.rst:23 +msgid "This will output" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 -msgid "" -"Having defined the data loading, model architecture, training, and " -"evaluation we can put everything together and train our CNN on CIFAR-10." -msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" +#: ../../source/docker/pin-version.rst:30 +#, fuzzy +msgid "Next, we can pin the digest when running a new SuperLink container:" +msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 -msgid "" -"So far, this should all look fairly familiar if you've used PyTorch " -"before. Let's take the next step and use what we've built to create a " -"simple federated learning system consisting of one server and two " -"clients." +#: ../../source/docker/run-as-root-user.rst:2 +msgid "Run with Root User Privileges" msgstr "" -"到目前为止,如果你以前用过 " -"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +#: ../../source/docker/run-as-root-user.rst:4 msgid "" -"The simple machine learning project discussed in the previous section " -"trains the model on a single dataset (CIFAR-10), we call this centralized" -" learning. This concept of centralized learning, as shown in the previous" -" section, is probably known to most of you, and many of you have used it " -"previously. Normally, if you'd want to run machine learning workloads in " -"a federated fashion, then you'd have to change most of your code and set " -"everything up from scratch. This can be a considerable effort." -msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" +"Flower Docker images, by default, run with a non-root user " +"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is " +"**not recommended** unless it is necessary for specific tasks during the " +"build process." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +#: ../../source/docker/run-as-root-user.rst:8 msgid "" -"However, with Flower you can evolve your pre-existing code into a " -"federated learning setup without the need for a major rewrite." -msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" +"Always make sure to run the container as a non-root user in production to" +" maintain security best practices." +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:12 +msgid "Run a Container with Root User Privileges" +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +#: ../../source/docker/run-as-root-user.rst:14 msgid "" -"The concept is easy to understand. We have to start a *server* and then " -"use the code in :code:`cifar.py` for the *clients* that are connected to " -"the *server*. The *server* sends model parameters to the clients. The " -"*clients* run the training and update the parameters. The updated " -"parameters are sent back to the *server* which averages all received " -"parameter updates. This describes one round of the federated learning " -"process and we repeat this for multiple rounds." +"Run the Docker image with the ``-u`` flag and specify ``root`` as the " +"username:" msgstr "" -"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 -msgid "" -"Finally, we will define our *client* logic in :code:`client.py` and build" -" upon the previously defined centralized training in :code:`cifar.py`. " -"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " -"update the parameters on our PyTorch model:" +#: ../../source/docker/run-as-root-user.rst:21 +msgid "This command will run the Docker container with root user privileges." +msgstr "" + +#: ../../source/docker/run-as-root-user.rst:24 +msgid "Run the Build Process with Root User Privileges" msgstr "" -"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " -"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " -"PyTorch 模型的参数:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +#: ../../source/docker/run-as-root-user.rst:26 msgid "" -"Implementing a Flower *client* basically means implementing a subclass of" -" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " -"Our implementation will be based on :code:`flwr.client.NumPyClient` and " -"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slightly easier" -" to implement than :code:`Client` if you use a framework with good NumPy " -"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " -"some of the boilerplate that would otherwise be necessary. " -":code:`CifarClient` needs to implement four methods, two methods for " -"getting/setting model parameters, one method for training the model, and " -"one method for testing the model:" +"If you want to switch to the root user during the build process of the " +"Docker image to install missing system dependencies, you can use the " +"``USER root`` directive within your Dockerfile." msgstr "" -"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " -":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" -" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " -":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " -"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 -msgid ":code:`set_parameters`" -msgstr ":code:`set_parameters`" +#: ../../source/docker/run-as-root-user.rst:30 +#, fuzzy +msgid "SuperNode Dockerfile" +msgstr "创建超级节点 Dockerfile" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 -msgid "get the updated local model weights and return them to the server" -msgstr "获取更新后的本地模型参数并发送回服务器" +#: ../../source/docker/run-as-subprocess.rst:2 +#, fuzzy +msgid "Run ClientApp as a Subprocess" +msgstr "运行分类器和测试" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 -msgid "return the local loss and accuracy to the server" -msgstr "向服务器返回本地损失值和精确度" +#: ../../source/docker/run-as-subprocess.rst:4 +msgid "" +"In this mode, the ClientApp is executed as a subprocess within the " +"SuperNode Docker container, rather than running in a separate container. " +"This approach reduces the number of running containers, which can be " +"beneficial for environments with limited resources. However, it also " +"means that the ClientApp is no longer isolated from the SuperNode, which " +"may introduce additional security concerns." +msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +#: ../../source/docker/run-as-subprocess.rst:13 msgid "" -"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" -" use of the functions :code:`train()` and :code:`test()` previously " -"defined in :code:`cifar.py`. So what we really do here is we tell Flower " -"through our :code:`NumPyClient` subclass which of our already defined " -"functions to call for training and evaluation. We included type " -"annotations to give you a better understanding of the data types that get" -" passed around." +"Before running the ClientApp as a subprocess, ensure that the FAB " +"dependencies have been installed in the SuperNode images. This can be " +"done by extending the SuperNode image:" msgstr "" -"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " -":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " -":code:`NumPyClient` 子类告知 Flower " -"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +#: ../../source/docker/run-as-subprocess.rst:17 +#, fuzzy +msgid "Dockerfile.supernode" +msgstr "Flower 服务器" + +#: ../../source/docker/run-as-subprocess.rst:31 +#, fuzzy msgid "" -"All that's left to do it to define a function that loads both model and " -"data, creates a :code:`CifarClient`, and starts this client. You load " -"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " -"with the function :code:`fl.client.start_client()` by pointing it at the " -"same IP address we used in :code:`server.py`:" -msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" +"Next, build the SuperNode Docker image by running the following command " +"in the directory where Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +#: ../../source/docker/run-as-subprocess.rst:39 +msgid "Run the ClientApp as a Subprocess" +msgstr "" + +#: ../../source/docker/run-as-subprocess.rst:41 msgid "" -"in each window (make sure that the server is running before you do so) " -"and see your (previously centralized) PyTorch project run federated " -"learning across two clients. Congratulations!" -msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" +"Start the SuperNode with the flag ``--isolation subprocess``, which tells" +" the SuperNode to execute the ClientApp as a subprocess:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "快速入门 iOS" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 msgid "" -"The full source code for this example: `PyTorch: From Centralized To " -"Federated (Code) `_. Our example is, of course, " -"somewhat over-simplified because both clients load the exact same " -"dataset, which isn't realistic. You're now prepared to explore this topic" -" further. How about using different subsets of CIFAR-10 on each client? " -"How about adding more clients?" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." msgstr "" -"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " -"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" -" CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/explanation-differential-privacy.rst:2 -#: ../../source/explanation-differential-privacy.rst:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 -msgid "Differential Privacy" -msgstr "差分隐私" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:12 +msgid "" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see Limitations_." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:3 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:15 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 #, fuzzy -msgid "" -"The information in datasets like healthcare, financial transactions, user" -" preferences, etc., is valuable and has the potential for scientific " -"breakthroughs and provides important business insights. However, such " -"data is also sensitive and there is a risk of compromising individual " -"privacy." -msgstr "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" +msgid "Before you start, make sure that:" +msgstr "开始之前,请确保 Docker 守护进程正在运行:" -#: ../../source/explanation-differential-privacy.rst:6 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 #, fuzzy -msgid "" -"Traditional methods like anonymization alone would not work because of " -"attacks like Re-identification and Data Linkage. That's where " -"differential privacy comes in. It provides the possibility of analyzing " -"data while ensuring the privacy of individuals." -msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" +msgid "The Docker daemon is running." +msgstr "验证 Docker 守护进程是否正在运行。" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:19 +msgid "Docker Compose is `installed `_." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:12 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:25 #, fuzzy +msgid "Run the Quickstart Example" +msgstr "示例请求" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:27 msgid "" -"Imagine two datasets that are identical except for a single record (for " -"instance, Alice's data). Differential Privacy (DP) guarantees that any " -"analysis (M), like calculating the average income, will produce nearly " -"identical results for both datasets (O and O' would be similar). This " -"preserves group patterns while obscuring individual details, ensuring the" -" individual's information remains hidden in the crowd." +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" msgstr "" -"试想一下,两个数据集除了一条记录(例如 Alice " -"的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " -"将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:35 +msgid "" +"Download the `compose.yml " +"`_" +" file into the example directory:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:44 #, fuzzy -msgid "DP Intro" -msgstr "DP 介绍" +msgid "Build and start the services using the following command:" +msgstr "运行以下命令激活 virtualenv:" -#: ../../source/explanation-differential-privacy.rst:22 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:50 #, fuzzy msgid "" -"One of the most commonly used mechanisms to achieve DP is adding enough " -"noise to the output of the analysis to mask the contribution of each " -"individual in the data while preserving the overall accuracy of the " -"analysis." -msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的贡献,同时保持分析的整体准确性。" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/explanation-differential-privacy.rst:25 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:52 +#: ../../source/docker/tutorial-quickstart-docker.rst:324 #, fuzzy -msgid "Formal Definition" -msgstr "编译 ProtoBuf 定义" +msgid "pyproject.toml" +msgstr "或 ``pyproject.toml```:" -#: ../../source/explanation-differential-privacy.rst:26 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 msgid "" -"Differential Privacy (DP) provides statistical guarantees against the " -"information an adversary can infer through the output of a randomized " -"algorithm. It provides an unconditional upper bound on the influence of a" -" single individual on the output of the algorithm by adding noise [1]. A " -"randomized mechanism M provides (:math:`\\epsilon`, " -":math:`\\delta`)-differential privacy if for any two neighboring " -"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " -"record, and for all possible outputs S ⊆ Range(A):" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." msgstr "" -"差分隐私(Differential " -"Privacy,DP)针对对手通过随机算法的输出所能推断出的信息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上限[1]。如果任意两个相邻的数据库D" -" :sub:`1`和D :sub:`2`只有一条记录不同,并且对于所有可能的输出S ⊆ " -"Range(A),随机化机制M提供(:math:`epsilon`,:math:`\\delta`)差异隐私:" -#: ../../source/explanation-differential-privacy.rst:32 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:64 msgid "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." msgstr "" -"\\small\n" -"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/explanation-differential-privacy.rst:38 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:68 #, fuzzy +msgid "Run the example:" +msgstr "将示例联邦化" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:74 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 msgid "" -"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " -"metric of privacy loss. It also controls the privacy-utility trade-off; " -"lower :math:`\\epsilon` values indicate higher levels of privacy but are " -"likely to reduce utility as well. The :math:`\\delta` parameter accounts " -"for a small probability on which the upper bound :math:`\\epsilon` does " -"not hold. The amount of noise needed to achieve differential privacy is " -"proportional to the sensitivity of the output, which measures the maximum" -" change in the output due to the inclusion or removal of a single record." +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." msgstr "" -":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`\\epsilon` " -"值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" -#: ../../source/explanation-differential-privacy.rst:45 -#, fuzzy -msgid "Differential Privacy in Machine Learning" -msgstr "差分隐私" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:84 +msgid "Run a Different Quickstart Example" +msgstr "" -#: ../../source/explanation-differential-privacy.rst:46 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:86 msgid "" -"DP can be utilized in machine learning to preserve the privacy of the " -"training data. Differentially private machine learning algorithms are " -"designed in a way to prevent the algorithm to learn any specific " -"information about any individual data points and subsequently prevent the" -" model from revealing sensitive information. Depending on the stage at " -"which noise is introduced, various methods exist for applying DP to " -"machine learning algorithms. One approach involves adding noise to the " -"training data (either to the features or labels), while another method " -"entails injecting noise into the gradients of the loss function during " -"model training. Additionally, such noise can be incorporated into the " -"model's output." +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:93 +msgid "After that, you can repeat the steps above." msgstr "" -"机器学习中可以利用 DP " -"来保护训练数据的隐私。差分保密机器学习算法的设计方式是防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引入噪声的阶段,有多种方法可将" -" DP " -"应用于机器学习算法。一种方法是在训练数据(特征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。此外,这种噪声还可以被纳入模型的输出中。" -#: ../../source/explanation-differential-privacy.rst:53 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 #, fuzzy -msgid "Differential Privacy in Federated Learning" -msgstr "扩大联邦学习的规模" +msgid "Limitations" +msgstr "运行模拟" -#: ../../source/explanation-differential-privacy.rst:54 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 #, fuzzy -msgid "" -"Federated learning is a data minimization approach that allows multiple " -"parties to collaboratively train a model without sharing their raw data. " -"However, federated learning also introduces new privacy challenges. The " -"model updates between parties and the central server can leak information" -" about the local data. These leaks can be exploited by attacks such as " -"membership inference and property inference attacks, or model inversion " -"attacks." -msgstr "联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击或模型反转攻击。" +msgid "Quickstart Example" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:58 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 #, fuzzy -msgid "" -"DP can play a crucial role in federated learning to provide privacy for " -"the clients' data." -msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" +msgid "quickstart-fastai" +msgstr "快速入门 fastai" -#: ../../source/explanation-differential-privacy.rst:60 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:123 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "无" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 #, fuzzy -msgid "" -"Depending on the granularity of privacy provision or the location of " -"noise addition, different forms of DP exist in federated learning. In " -"this explainer, we focus on two approaches of DP utilization in federated" -" learning based on where the noise is added: at the server (also known as" -" the center) or at the client (also known as the local)." -msgstr "" -"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " -"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" +msgid "quickstart-huggingface" +msgstr "快速入门教程" -#: ../../source/explanation-differential-privacy.rst:63 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 #, fuzzy -msgid "" -"**Central Differential Privacy**: DP is applied by the server and the " -"goal is to prevent the aggregated model from leaking information about " -"each client's data." -msgstr "**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" +msgid "quickstart-jax" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:65 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:125 #, fuzzy msgid "" -"**Local Differential Privacy**: DP is applied on the client side before " -"sending any information to the server and the goal is to prevent the " -"updates that are sent to the server from leaking any information about " -"the client's data." -msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/how-to-use-differential-privacy.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 #, fuzzy -msgid "Central Differential Privacy" -msgstr "差分隐私" +msgid "quickstart-mlcube" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:69 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 #, fuzzy -msgid "" -"In this approach, which is also known as user-level DP, the central " -"server is responsible for adding noise to the globally aggregated " -"parameters. It should be noted that trust in the server is required." -msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要注意的是,这需要对服务器的信任。" +msgid "quickstart-mlx" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:76 -#, fuzzy +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 msgid "" -"While there are various ways to implement central DP in federated " -"learning, we concentrate on the algorithms proposed by [2] and [3]. The " -"overall approach is to clip the model updates sent by the clients and add" -" some amount of noise to the aggregated model. In each iteration, a " -"random set of clients is chosen with a specific probability for training." -" Each client performs local training on its own data. The update of each " -"client is then clipped by some value `S` (sensitivity `S`). This would " -"limit the impact of any individual client which is crucial for privacy " -"and often beneficial for robustness. A common approach to achieve this is" -" by restricting the `L2` norm of the clients' model updates, ensuring " -"that larger updates are scaled down to fit within the norm `S`." +"`Requires to run on macOS with Apple Silicon `_." msgstr "" -"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" -" `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" -#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 #, fuzzy -msgid "clipping" -msgstr "剪贴" +msgid "quickstart-monai" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#, fuzzy +msgid "quickstart-pandas" +msgstr "快速入门Pandas" -#: ../../source/explanation-differential-privacy.rst:89 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 #, fuzzy +msgid "quickstart-pytorch-lightning" +msgstr "快速入门 PyTorch Lightning" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 msgid "" -"Afterwards, the Gaussian mechanism is used to add noise in order to " -"distort the sum of all clients' updates. The amount of noise is scaled to" -" the sensitivity value to obtain a privacy guarantee. The Gaussian " -"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " -"noise_scale * S ) / (number of sampled clients)`." +"Requires an older pip version that is not supported by the Flower Docker " +"images." msgstr "" -"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` " -",其中 σ = ( 噪声规模 * S ) / (采样客户数)`。" -#: ../../source/explanation-differential-privacy.rst:94 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 #, fuzzy -msgid "Clipping" -msgstr "剪贴" +msgid "quickstart-pytorch" +msgstr "PyTorch快速入门" -#: ../../source/explanation-differential-privacy.rst:96 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 #, fuzzy -msgid "" -"There are two forms of clipping commonly used in Central DP: Fixed " -"Clipping and Adaptive Clipping." -msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" +msgid "quickstart-sklearn-tabular" +msgstr "scikit-learn快速入门" -#: ../../source/explanation-differential-privacy.rst:98 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:124 #, fuzzy -msgid "" -"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " -"of clients' updates. Any update exceeding this threshold is clipped back " -"to the threshold value." -msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值的更新都会被剪切回阈值。" +msgid "quickstart-tabnet" +msgstr "快速入门 JAX" -#: ../../source/explanation-differential-privacy.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:126 #, fuzzy -msgid "" -"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " -"on the observed update distribution [4]. It means that the clipping value" -" is tuned during the rounds with respect to the quantile of the update " -"norm distribution." -msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮中,会根据更新规范分布的量化值调整削波值。" +msgid "quickstart-tensorflow" +msgstr "快速入门 TensorFlow" -#: ../../source/explanation-differential-privacy.rst:102 -#, fuzzy -msgid "" -"The choice between fixed and adaptive clipping depends on various factors" -" such as privacy requirements, data distribution, model complexity, and " -"others." -msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:127 +msgid "Only runs on AMD64." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:-1 -#: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/how-to-use-differential-privacy.rst:96 +#: ../../source/docker/set-environment-variables.rst:2 #, fuzzy -msgid "Local Differential Privacy" -msgstr "差分隐私" +msgid "Set Environment Variables" +msgstr "设置编码环境" -#: ../../source/explanation-differential-privacy.rst:107 +#: ../../source/docker/set-environment-variables.rst:4 #, fuzzy msgid "" -"In this approach, each client is responsible for performing DP. Local DP " -"avoids the need for a fully trusted aggregator, but it should be noted " -"that local DP leads to a decrease in accuracy but better privacy in " -"comparison to central DP." -msgstr "" -"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " -"会降低准确性,但却能更好地保护隐私。" +"To set a variable inside a Docker container, you can use the ``-e " +"=`` flag. Multiple ``-e`` flags can be used to set multiple " +"environment variables for a container." +msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" -#: ../../source/explanation-differential-privacy.rst:116 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:2 #, fuzzy -msgid "In this explainer, we focus on two forms of achieving Local DP:" -msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" +msgid "Deploy Flower on Multiple Machines with Docker Compose" +msgstr "快速入门 iOS" -#: ../../source/explanation-differential-privacy.rst:118 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:4 msgid "" -"Each client adds noise to the local updates before sending them to the " -"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " -"the sensitivity of the local model to be ∆, Gaussian noise is applied " -"with a noise scale of σ where:" +"This guide will help you set up a Flower project on multiple machines " +"using Docker Compose." msgstr "" -"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:math:`\\epsilon`, " -":math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高斯噪声,噪声尺度为 σ,其中:" -#: ../../source/explanation-differential-privacy.rst:120 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:7 msgid "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" +"You will learn how to run the Flower client and server components on two " +"separate machines, with Flower configured to use TLS encryption and " +"persist SuperLink state across restarts. A server consists of a SuperLink" +" and ``SuperExec``. For more details about the Flower architecture, refer" +" to the :doc:`../explanation-flower-architecture` explainer page." msgstr "" -"\\small\n" -"\\frac{∆ \\times \\sqrt{2 \\times " -"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" -"\n" -#: ../../source/explanation-differential-privacy.rst:125 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:13 msgid "" -"Each client adds noise to the gradients of the model during the local " -"training (DP-SGD). More specifically, in this approach, gradients are " -"clipped and an amount of calibrated noise is injected into the gradients." -msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说,在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" +"This guide assumes you have completed the :doc:`tutorial-quickstart-" +"docker-compose` tutorial. It is highly recommended that you follow and " +"understand the contents of that tutorial before proceeding with this " +"guide." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:128 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:20 +msgid "Before you begin, make sure you have the following prerequisites:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:23 +msgid "The Docker daemon is running on your local machine and the remote machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:24 msgid "" -"Please note that these two approaches are providing privacy at different " -"levels." -msgstr "请注意,这两种方法提供了不同层次的隐私。" +"Docker Compose V2 is installed on both your local machine and the remote " +"machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:131 -#, fuzzy -msgid "**References:**" -msgstr "参考资料" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:25 +msgid "You can connect to the remote machine from your local machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:133 -#, fuzzy -msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." -msgstr "[1] Dwork 等:《差分隐私的算法基础》。" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:26 +msgid "Ports ``9091`` and ``9093`` are accessible on the remote machine." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:135 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:30 msgid "" -"[2] McMahan et al. Learning Differentially Private Recurrent Language " -"Models." +"The guide uses the |quickstart_sklearn_tabular|_ example as an example " +"project." msgstr "" -"McMahan, H. Brendan等. \"Learning differentially private recurrent " -"language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/explanation-differential-privacy.rst:137 -#, fuzzy +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:32 msgid "" -"[3] Geyer et al. Differentially Private Federated Learning: A Client " -"Level Perspective." -msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" +"If your project has a different name or location, please remember to " +"adjust the commands/paths accordingly." +msgstr "" -#: ../../source/explanation-differential-privacy.rst:139 -#, fuzzy -msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:36 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:22 +#: ../../source/docker/tutorial-quickstart-docker.rst:19 +msgid "Step 1: Set Up" msgstr "" -"Andrew, Galen等. \"Differentially private learning with adaptive " -"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " -"17455-17466." -#: ../../source/explanation-federated-evaluation.rst:2 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 -msgid "Federated evaluation" -msgstr "联邦学习评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:38 +msgid "Clone the Flower repository and change to the ``distributed`` directory:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:4 -msgid "" -"There are two main approaches to evaluating models in federated learning " -"systems: centralized (or server-side) evaluation and federated (or " -"client-side) evaluation." -msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:45 +msgid "Get the IP address from the remote machine and save it for later." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:8 -msgid "Centralized Evaluation" -msgstr "集中评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:46 +msgid "" +"Use the ``certs.yml`` Compose file to generate your own self-signed " +"certificates. If you have certificates, you can continue with Step 2." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:11 -msgid "Built-In Strategies" -msgstr "内置策略" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:51 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:221 +msgid "These certificates should be used only for development purposes." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:13 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:53 msgid "" -"All built-in strategies support centralized evaluation by providing an " -"evaluation function during initialization. An evaluation function is any " -"function that can take the current global model parameters as input and " -"return evaluation results:" -msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" - -#: ../../source/explanation-federated-evaluation.rst:58 -msgid "Custom Strategies" -msgstr "定制策略" +"For production environments, you may have to use dedicated services to " +"obtain your certificates." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:60 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:56 msgid "" -"The :code:`Strategy` abstraction provides a method called " -":code:`evaluate` that can directly be used to evaluate the current global" -" model parameters. The current server implementation calls " -":code:`evaluate` after parameter aggregation and before federated " -"evaluation (see next paragraph)." +"First, set the environment variables ``SUPERLINK_IP`` and " +"``SUPEREXEC_IP`` with the IP address from the remote machine. For " +"example, if the IP is ``192.168.2.33``, execute:" msgstr "" -":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " -"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" -#: ../../source/explanation-federated-evaluation.rst:65 -msgid "Federated Evaluation" -msgstr "联邦评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:65 +msgid "Next, generate the self-signed certificates:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:68 -msgid "Implementing Federated Evaluation" -msgstr "实现联邦评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:72 +msgid "Step 2: Copy the Server Compose Files" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:70 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:74 msgid "" -"Client-side evaluation happens in the :code:`Client.evaluate` method and " -"can be configured from the server side." -msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" +"Use the method that works best for you to copy the ``server`` directory, " +"the certificates, and your Flower project to the remote machine." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:101 -msgid "Configuring Federated Evaluation" -msgstr "配置联邦评估" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:77 +msgid "For example, you can use ``scp`` to copy the directories:" +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:87 +#, fuzzy +msgid "Step 3: Start the Flower Server Components" +msgstr "然后,我们启动服务器:" -#: ../../source/explanation-federated-evaluation.rst:103 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:89 msgid "" -"Federated evaluation can be configured from the server side. Built-in " -"strategies support the following arguments:" -msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" +"Log into the remote machine using ``ssh`` and run the following command " +"to start the SuperLink and SuperExec services:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:105 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:102 msgid "" -":code:`fraction_evaluate`: a :code:`float` defining the fraction of " -"clients that will be selected for evaluation. If " -":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " -"are connected to the server, then :code:`10` will be randomly selected " -"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " -"federated evaluation will be disabled." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``server`` Docker Compose files." msgstr "" -":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " -":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " -":code:`0.0`,联邦评估将被禁用。" -#: ../../source/explanation-federated-evaluation.rst:106 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:105 +msgid "Go back to your terminal on your local machine." +msgstr "" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:108 +#, fuzzy +msgid "Step 4: Start the Flower Client Components" +msgstr "然后,我们启动服务器:" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:110 msgid "" -":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " -"clients to be selected for evaluation. If :code:`fraction_evaluate` is " -"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " -":code:`100` clients are connected to the server, then :code:`20` clients " -"will be selected for evaluation." +"On your local machine, run the following command to start the client " +"components:" msgstr "" -":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " -":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " -"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/explanation-federated-evaluation.rst:107 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:120 msgid "" -":code:`min_available_clients`: an :code:`int` that defines the minimum " -"number of clients which need to be connected to the server before a round" -" of federated evaluation can start. If fewer than " -":code:`min_available_clients` are connected to the server, the server " -"will wait until more clients are connected before it continues to sample " -"clients for evaluation." +"The Path of the ``PROJECT_DIR`` should be relative to the location of the" +" ``client`` Docker Compose files." msgstr "" -":code:`min_available_clients`: " -":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " -":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/explanation-federated-evaluation.rst:108 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:124 +#, fuzzy +msgid "Step 5: Run Your Flower Project" +msgstr "Flower 服务器。" + +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:126 msgid "" -":code:`on_evaluate_config_fn`: a function that returns a configuration " -"dictionary which will be sent to the selected clients. The function will " -"be called during each round and provides a convenient way to customize " -"client-side evaluation from the server side, for example, to configure " -"the number of validation steps performed." -msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" +"Specify the remote SuperExec IP addresses and the path to the root " +"certificate in the ``[tool.flwr.federations.remote-superexec]`` table in " +"the ``pyproject.toml`` file. Here, we have named our remote federation " +"``remote-superexec``:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:135 -msgid "Evaluating Local Model Updates During Training" -msgstr "评估训练期间的本地模型更新" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:130 +#, fuzzy +msgid "examples/quickstart-sklearn-tabular/pyproject.toml" +msgstr "scikit-learn快速入门" -#: ../../source/explanation-federated-evaluation.rst:137 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:139 msgid "" -"Model parameters can also be evaluated during training. " -":code:`Client.fit` can return arbitrary evaluation results as a " -"dictionary:" -msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" +"The Path of the ``root-certificates`` should be relative to the location " +"of the ``pyproject.toml`` file." +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:177 -msgid "Full Code Example" -msgstr "完整代码示例" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:142 +msgid "To run the project, execute:" +msgstr "" -#: ../../source/explanation-federated-evaluation.rst:179 +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:148 msgid "" -"For a full code example that uses both centralized and federated " -"evaluation, see the *Advanced TensorFlow Example* (the same approach can " -"be applied to workloads implemented in any other framework): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +"That's it! With these steps, you've set up Flower on two separate " +"machines and are ready to start using it." msgstr "" -"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " -"Example*(同样的方法也可应用于任何其他框架中): " -"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/fed/0000-20200102-fed-template.md:10 -msgid "FED Template" -msgstr "FED 模板" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:152 +msgid "Step 6: Clean Up" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:12 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 -msgid "Table of Contents" -msgstr "目录" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:154 +#, fuzzy +msgid "Shut down the Flower client components:" +msgstr "Flower 客户端。" -#: ../../source/fed/0000-20200102-fed-template.md:14 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 -msgid "[Table of Contents](#table-of-contents)" -msgstr "[目录](#table-of-contents)" +#: ../../source/docker/tutorial-deploy-on-multiple-machines.rst:161 +msgid "Shut down the Flower server components and delete the SuperLink state:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:15 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 -msgid "[Summary](#summary)" -msgstr "[总结](#summary)" +#: ../../source/docker/tutorial-quickstart-docker.rst:2 +#, fuzzy +msgid "Quickstart with Docker" +msgstr "快速入门 iOS" -#: ../../source/fed/0000-20200102-fed-template.md:16 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 -msgid "[Motivation](#motivation)" -msgstr "[动机](#motivation)" +#: ../../source/docker/tutorial-quickstart-docker.rst:4 +msgid "" +"This quickstart aims to guide you through the process of containerizing a" +" Flower project and running it end to end using Docker on your local " +"machine." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:17 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 -msgid "[Goals](#goals)" -msgstr "[目标](#goals)" +#: ../../source/docker/tutorial-quickstart-docker.rst:7 +msgid "" +"This tutorial does not use production-ready settings, so you can focus on" +" understanding the basic workflow that uses the minimum configurations." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:18 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 -msgid "[Non-Goals](#non-goals)" -msgstr "[非目标](#non-goals)" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:32 +#: ../../source/docker/tutorial-quickstart-docker.rst:21 +msgid "Create a new Flower project (PyTorch):" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:19 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 -msgid "[Proposal](#proposal)" -msgstr "[计划](#proposal)" +#: ../../source/docker/tutorial-quickstart-docker.rst:39 +msgid "Create a new Docker bridge network called ``flwr-network``:" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:20 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 -msgid "[Drawbacks](#drawbacks)" -msgstr "[缺点](#drawbacks)" +#: ../../source/docker/tutorial-quickstart-docker.rst:45 +msgid "" +"User-defined networks, such as ``flwr-network``, enable IP resolution of " +"container names, a feature absent in the default bridge network. This " +"simplifies quickstart example by avoiding the need to determine host IP " +"first." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:21 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 -msgid "[Alternatives Considered](#alternatives-considered)" -msgstr "[备选方案](#alternatives-considered)" +#: ../../source/docker/tutorial-quickstart-docker.rst:50 +#, fuzzy +msgid "Step 2: Start the SuperLink" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0000-20200102-fed-template.md:22 -msgid "[Appendix](#appendix)" -msgstr "[附录](#appendix)" - -#: ../../source/fed/0000-20200102-fed-template.md:24 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 -msgid "Summary" -msgstr "总结" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:62 +#: ../../source/docker/tutorial-quickstart-docker.rst:52 +#, fuzzy +msgid "Open your terminal and run:" +msgstr "打开另一台终端,启动第二个客户端:" -#: ../../source/fed/0000-20200102-fed-template.md:26 -msgid "\\[TODO - sentence 1: summary of the problem\\]" -msgstr "\\[TODO - 句子 1: 问题概括\\]" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "Understand the command" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:28 -msgid "\\[TODO - sentence 2: summary of the solution\\]" -msgstr "\\[TODO - 句子 2: 解决方案概括\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the " +"container to the same port of" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:30 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 -msgid "Motivation" -msgstr "动机" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the Driver API on" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:32 -#: ../../source/fed/0000-20200102-fed-template.md:36 -#: ../../source/fed/0000-20200102-fed-template.md:40 -#: ../../source/fed/0000-20200102-fed-template.md:44 -#: ../../source/fed/0000-20200102-fed-template.md:48 -#: ../../source/fed/0000-20200102-fed-template.md:54 -#: ../../source/fed/0000-20200102-fed-template.md:58 -msgid "\\[TODO\\]" -msgstr "\\[TODO\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:34 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 -msgid "Goals" -msgstr "目标" +#: ../../source/docker/tutorial-quickstart-docker.rst:71 +#: ../../source/docker/tutorial-quickstart-docker.rst:108 +#: ../../source/docker/tutorial-quickstart-docker.rst:219 +#: ../../source/docker/tutorial-quickstart-docker.rst:309 +msgid "" +"``--network flwr-network``: Make the container join the network named " +"``flwr-network``." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:38 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 -msgid "Non-Goals" -msgstr "非目标" +#: ../../source/docker/tutorial-quickstart-docker.rst:72 +msgid "``--name superlink``: Assign the name ``superlink`` to the container." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:42 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 -msgid "Proposal" -msgstr "提案" +#: ../../source/docker/tutorial-quickstart-docker.rst:73 +#: ../../source/docker/tutorial-quickstart-docker.rst:110 +#: ../../source/docker/tutorial-quickstart-docker.rst:220 +#: ../../source/docker/tutorial-quickstart-docker.rst:311 +msgid "" +"``--detach``: Run the container in the background, freeing up the " +"terminal." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:46 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 -msgid "Drawbacks" -msgstr "缺点" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"tag of the image. The tag :substitution-code:`|stable_flwr_version|` " +"represents a :doc:`specific version ` of the image." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:50 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 -msgid "Alternatives Considered" -msgstr "备选方案" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--insecure``: This flag tells the container to operate in an insecure " +"mode, allowing" +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:52 -msgid "\\[Alternative 1\\]" -msgstr "\\[备选 1\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "unencrypted communication." +msgstr "" -#: ../../source/fed/0000-20200102-fed-template.md:56 -msgid "\\[Alternative 2\\]" -msgstr "\\[备选 2\\]" +#: ../../source/docker/tutorial-quickstart-docker.rst:80 +#, fuzzy +msgid "Step 3: Start the SuperNode" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 -msgid "Flower Enhancement Doc" -msgstr "Flower 改善文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:82 +msgid "Start two SuperNode containers." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 -msgid "[Enhancement Doc Template](#enhancement-doc-template)" -msgstr "[增强文档模版](#enhancement-doc-template)" +#: ../../source/docker/tutorial-quickstart-docker.rst:84 +msgid "Start the first container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 -msgid "[Metadata](#metadata)" -msgstr "[描述数据](#metadata)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9094:9094``: Map port ``9094`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 -msgid "[Workflow](#workflow)" -msgstr "[工作流程](#workflow)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the host machine, allowing other services to access the SuperNode API on" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 -msgid "[GitHub Issues](#github-issues)" -msgstr "[GitHub 问题](#github-issues)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``http://localhost:9094``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 -msgid "[Google Docs](#google-docs)" -msgstr "[谷歌文档](#google-docs)" +#: ../../source/docker/tutorial-quickstart-docker.rst:109 +msgid "``--name supernode-1``: Assign the name ``supernode-1`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 -msgid "A Flower Enhancement is a standardized development process to" -msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``flwr/supernode:|stable_flwr_version|``: This is the name of the image " +"to be run and the specific tag" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 -msgid "provide a common structure for proposing larger changes" -msgstr "为提出更大规模的改动提供一个共同的结构" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "of the image." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 -msgid "ensure that the motivation for a change is clear" -msgstr "确保改动的动机明确" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at " +"the address" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 -msgid "persist project information in a version control system" -msgstr "将项目信息保存在版本控制系统中" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``superlink:9092``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 -msgid "document the motivation for impactful user-facing changes" -msgstr "记录面向用户的具有影响力的改动的动机" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--node-config \"partition-id=0 num-partitions=2\"``: Set the partition " +"ID to ``0`` and the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 -msgid "reserve GitHub issues for tracking work in flight" -msgstr "保留 GitHub 问题,用于跟踪进行中的工作" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "number of partitions to ``2`` for the SuperNode configuration." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"ensure community participants can successfully drive changes to " -"completion across one or more releases while stakeholders are adequately " -"represented throughout the process" -msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" +"``--supernode-address 0.0.0.0:9094``: Set the address and port number " +"that the SuperNode" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 -msgid "Hence, an Enhancement Doc combines aspects of" -msgstr "因此,\"增强文件\"将以下方面结合起来" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "is listening on." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 -msgid "a feature, and effort-tracking document" -msgstr "一个功能和效力跟踪文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--isolation process``: Tells the SuperNode that the ClientApp is " +"created by separate" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 -msgid "a product requirements document" -msgstr "一个产品需要文档" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "independent process. The SuperNode does not attempt to create it." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 -msgid "a design document" -msgstr "一个设计文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:124 +#, fuzzy +msgid "Start the second container:" +msgstr "启动服务器" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 -msgid "" -"into one file, which is created incrementally in collaboration with the " -"community." -msgstr "该文件是与社区合作逐步创建的。" +#: ../../source/docker/tutorial-quickstart-docker.rst:142 +msgid "Step 4: Start the ClientApp" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:144 msgid "" -"For far-fetching changes or features proposed to Flower, an abstraction " -"beyond a single GitHub issue or pull request is required to understand " -"and communicate upcoming changes to the project." +"The ClientApp Docker image comes with a pre-installed version of Flower " +"and serves as a base for building your own ClientApp image. In order to " +"install the FAB dependencies, you will need to create a Dockerfile that " +"extends the ClientApp image and installs the required dependencies." msgstr "" -"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " -"request)的抽象概念,以了解和沟通项目即将发生的变更。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +#: ../../source/docker/tutorial-quickstart-docker.rst:149 msgid "" -"The purpose of this process is to reduce the amount of \"tribal " -"knowledge\" in our community. By moving decisions from Slack threads, " -"video calls, and hallway conversations into a well-tracked artifact, this" -" process aims to enhance communication and discoverability." +"Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste " +"the following code into it:" msgstr "" -"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " -"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +#: ../../source/docker/tutorial-quickstart-docker.rst:152 +#, fuzzy +msgid "Dockerfile.clientapp" +msgstr "Flower 客户端。" + +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "Understand the Dockerfile" +msgstr "创建超级节点 Dockerfile" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Roughly any larger, user-facing enhancement should follow the Enhancement" -" process. If an enhancement would be described in either written or " -"verbal communication to anyone besides the author or developer, then " -"consider creating an Enhancement Doc." -msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" +":substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Similarly, any technical effort (refactoring, major architectural change)" -" that will impact a large section of the development community should " -"also be communicated widely. The Enhancement process is suited for this " -"even if it will have zero impact on the typical user or operator." -msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" +"to be built from is the ``flwr/clientapp image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``WORKDIR /app``: Set the working directory for the container to ``/app``." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"For small changes and additions, going through the Enhancement process " -"would be time-consuming and unnecessary. This includes, for example, " -"adding new Federated Learning algorithms, as these only add features " -"without changing how Flower works or is used." +"Any subsequent commands that reference a directory will be relative to " +"this directory." msgstr "" -"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " -"\"的工作或使用方式。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Enhancements are different from feature requests, as they are already " -"providing a laid-out path for implementation and are championed by " -"members of the community." -msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" +"from the current working directory into the container's ``/app`` " +"directory." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"An Enhancement is captured in a Markdown file that follows a defined " -"template and a workflow to review and store enhancement docs for " -"reference — the Enhancement Doc." -msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" +"``RUN sed -i 's/.*flwr\\[simulation\\].*//' pyproject.toml``: Remove the " +"``flwr`` dependency" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 -msgid "Enhancement Doc Template" -msgstr "增强文档模板" +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy +msgid "from the ``pyproject.toml``." +msgstr "或 ``pyproject.toml```:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Each enhancement doc is provided as a Markdown file having the following " -"structure" -msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" +"``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install " +"command to" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 -msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" -msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "install the dependencies defined in the ``pyproject.toml`` file" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 -msgid "Title (same as in metadata)" -msgstr "标题(与描述数据中的标题相同)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"The ``-U`` flag indicates that any existing packages should be upgraded, " +"and" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 -msgid "Table of Contents (if needed)" -msgstr "目录(如有需要)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--no-cache-dir`` prevents pip from using the cache to speed up the " +"installation." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 -msgid "Notes/Constraints/Caveats (optional)" -msgstr "注意事项/限制/警告(可选)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``ENTRYPOINT [\"flwr-clientapp\"]``: Set the command ``flwr-clientapp`` " +"to be" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 -msgid "Design Details (optional)" -msgstr "设计细节(可选)" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "the default command run when the container is started." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 -msgid "Graduation Criteria" -msgstr "毕业标准" +#: ../../source/docker/tutorial-quickstart-docker.rst:186 +msgid "" +"Note that `flwr `__ is already installed " +"in the ``flwr/clientapp`` base image, so only other package dependencies " +"such as ``flwr-datasets``, ``torch``, etc., need to be installed. As a " +"result, the ``flwr`` dependency is removed from the ``pyproject.toml`` " +"after it has been copied into the Docker image (see line 5)." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 -msgid "Upgrade/Downgrade Strategy (if applicable)" -msgstr "升级/降级策略(如适用)" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 -msgid "As a reference, this document follows the above structure." -msgstr "作为参考,本文件采用上述结构。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 -#: ../../source/ref-api/flwr.common.Metadata.rst:2 -msgid "Metadata" -msgstr "描述数据" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 -msgid "" -"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" -" Doc + 1. With this number, it becomes easy to reference other proposals." -msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 -msgid "**title** (Required) The title of the proposal in plain language." -msgstr "**标题** (必填)用简明语言写出提案的标题。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +#: ../../source/docker/tutorial-quickstart-docker.rst:192 +#, fuzzy msgid "" -"**status** (Required) The current status of the proposal. See " -"[workflow](#workflow) for the possible states." -msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" +"Next, build the ClientApp Docker image by running the following command " +"in the directory where the Dockerfile is located:" +msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +#: ../../source/docker/tutorial-quickstart-docker.rst:201 +#, fuzzy msgid "" -"**authors** (Required) A list of authors of the proposal. This is simply " -"the GitHub ID." -msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" +"The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. " +"Remember that these values are merely examples, and you can customize " +"them according to your requirements." +msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 -msgid "" -"**creation-date** (Required) The date that the proposal was first " -"submitted in a PR." -msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" +#: ../../source/docker/tutorial-quickstart-docker.rst:205 +#, fuzzy +msgid "Start the first ClientApp container:" +msgstr "使用虚拟客户端引擎" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"**last-updated** (Optional) The date that the proposal was last changed " -"significantly." -msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" +"``flwr_clientapp:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"**see-also** (Optional) A list of other proposals that are relevant to " -"this one." -msgstr "**另见** (可选)与本提案相关的其他提案清单。" +"``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at" +" the address" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 -msgid "**replaces** (Optional) A list of proposals that this one replaces." -msgstr "**取代**(可选) 这份提案所取代的提案列表。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``supernode-1:9094``." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 -msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." -msgstr "**被取代者** (可选) 此提案取代的提案列表。" +#: ../../source/docker/tutorial-quickstart-docker.rst:226 +msgid "Start the second ClientApp container:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 -msgid "Workflow" -msgstr "工作流程" +#: ../../source/docker/tutorial-quickstart-docker.rst:237 +#, fuzzy +msgid "Step 5: Start the SuperExec" +msgstr "然后,我们启动服务器:" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +#: ../../source/docker/tutorial-quickstart-docker.rst:239 +#, fuzzy msgid "" -"The idea forming the enhancement should already have been discussed or " -"pitched in the community. As such, it needs a champion, usually the " -"author, who shepherds the enhancement. This person also has to find " -"committers to Flower willing to review the proposal." -msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" +"The procedure for building and running a SuperExec image is almost " +"identical to the ClientApp image." +msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +#: ../../source/docker/tutorial-quickstart-docker.rst:242 msgid "" -"New enhancements are checked in with a file name in the form of `NNNN-" -"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " -"Doc number, to `enhancements`. All enhancements start in `provisional` " -"state as part of a pull request. Discussions are done as part of the pull" -" request review." +"Similar to the ClientApp image, you will need to create a Dockerfile that" +" extends the SuperExec image and installs the required FAB dependencies." msgstr "" -"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " -"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " -"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +#: ../../source/docker/tutorial-quickstart-docker.rst:245 msgid "" -"Once an enhancement has been reviewed and approved, its status is changed" -" to `implementable`. The actual implementation is then done in separate " -"pull requests. These pull requests should mention the respective " -"enhancement as part of their description. After the implementation is " -"done, the proposal status is changed to `implemented`." +"Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste " +"the following code in:" msgstr "" -"一旦增强功能通过审核和批准,其状态就会变为 " -"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 -msgid "" -"Under certain conditions, other states are possible. An Enhancement has " -"the following states:" -msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" +#: ../../source/docker/tutorial-quickstart-docker.rst:248 +msgid "Dockerfile.superexec" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`provisional`: The enhancement has been proposed and is actively being " -"defined. This is the starting state while the proposal is being fleshed " -"out and actively defined and discussed." -msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 -msgid "`implementable`: The enhancement has been reviewed and approved." -msgstr "`可实施`: 增强功能已审核通过。" +":substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line" +" specifies that the Docker image" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`implemented`: The enhancement has been implemented and is no longer " -"actively changed." -msgstr "`已实施`: 增强功能已实施,不再主动更改。" - -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 -msgid "`deferred`: The enhancement is proposed but not actively being worked on." -msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" +"to be built from is the ``flwr/superexec image``, version :substitution-" +"code:`|stable_flwr_version|`." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"`rejected`: The authors and reviewers have decided that this enhancement " -"is not moving forward." -msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" +"``ENTRYPOINT [\"flower-superexec\"``: Set the command ``flower-" +"superexec`` to be" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 -msgid "`withdrawn`: The authors have withdrawn the enhancement." -msgstr "`撤回`: 作者已撤回增强功能。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``\"--executor\", \"flwr.superexec.deployment:executor\"]`` Use the" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 -msgid "`replaced`: The enhancement has been replaced by a new enhancement." -msgstr "`已替换`: 增强功能已被新的增强功能取代。" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``flwr.superexec.deployment:executor`` executor to run the ServerApps." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +#: ../../source/docker/tutorial-quickstart-docker.rst:283 msgid "" -"Adding an additional process to the ones already provided by GitHub " -"(Issues and Pull Requests) adds more complexity and can be a barrier for " -"potential first-time contributors." -msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" +"Afterward, in the directory that holds the Dockerfile, execute this " +"Docker command to build the SuperExec image:" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 -msgid "" -"Expanding the proposal template beyond the single-sentence description " -"currently required in the features issue template may be a heavy burden " -"for non-native English speakers." -msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" +#: ../../source/docker/tutorial-quickstart-docker.rst:290 +#, fuzzy +msgid "Start the SuperExec container:" +msgstr "启动服务器" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 -msgid "GitHub Issues" -msgstr "GitHub 问题" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "``-p 9093:9093``: Map port ``9093`` of the container to the same port of" +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +#: ../../source/docker/tutorial-quickstart-docker.rst msgid "" -"Using GitHub Issues for these kinds of enhancements is doable. One could " -"use, for example, tags, to differentiate and filter them from other " -"issues. The main issue is in discussing and reviewing an enhancement: " -"GitHub issues only have a single thread for comments. Enhancements " -"usually have multiple threads of discussion at the same time for various " -"parts of the doc. Managing these multiple discussions can be confusing " -"when using GitHub Issues." +"the host machine, allowing you to access the SuperExec API on " +"``http://localhost:9093``." msgstr "" -"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " -"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " -"问题时,管理这些多重讨论会很混乱。" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 -msgid "Google Docs" -msgstr "谷歌文档" +#: ../../source/docker/tutorial-quickstart-docker.rst:310 +msgid "``--name superexec``: Assign the name ``superexec`` to the container." +msgstr "" -#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +#: ../../source/docker/tutorial-quickstart-docker.rst +#, fuzzy msgid "" -"Google Docs allow for multiple threads of discussions. But as Google Docs" -" are hosted outside the project, their discoverability by the community " -"needs to be taken care of. A list of links to all proposals has to be " -"managed and made available for the community. Compared to shipping " -"proposals as part of Flower's repository, the potential for missing links" -" is much higher." -msgstr "" -"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" -" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" +"``flwr_superexec:0.0.1``: This is the name of the image to be run and the" +" specific tag" +msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" -#: ../../source/fed/index.md:1 -msgid "FED - Flower Enhancement Doc" -msgstr "FED - Flower 增强文件" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "" +"``--executor-config superlink=\\\"superlink:9091\\\"``: Configure the " +"SuperExec executor to" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:2 -msgid "Aggregate evaluation results" -msgstr "整合评估结果" +#: ../../source/docker/tutorial-quickstart-docker.rst +msgid "connect to the SuperLink running on port ``9091``." +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:4 -msgid "" -"The Flower server does not prescribe a way to aggregate evaluation " -"results, but it enables the user to fully customize result aggregation." -msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" +#: ../../source/docker/tutorial-quickstart-docker.rst:320 +msgid "Step 6: Run the Quickstart Project" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:8 -msgid "Aggregate Custom Evaluation Results" -msgstr "自定义整合评估结果" +#: ../../source/docker/tutorial-quickstart-docker.rst:322 +#, fuzzy +msgid "Add the following lines to the ``pyproject.toml``:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/how-to-aggregate-evaluation-results.rst:10 -msgid "" -"The same :code:`Strategy`-customization approach can be used to aggregate" -" custom evaluation results coming from individual clients. Clients can " -"return custom metrics to the server by returning a dictionary:" -msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" +#: ../../source/docker/tutorial-quickstart-docker.rst:331 +msgid "Run the ``quickstart-docker`` project by executing the command:" +msgstr "" -#: ../../source/how-to-aggregate-evaluation-results.rst:36 -msgid "" -"The server can then use a customized strategy to aggregate the metrics " -"provided in these dictionaries:" -msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" +#: ../../source/docker/tutorial-quickstart-docker.rst:337 +msgid "Follow the SuperExec logs to track the execution of the run:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:2 +#: ../../source/docker/tutorial-quickstart-docker.rst:344 #, fuzzy -msgid "Authenticate SuperNodes" -msgstr "验证超级节点" +msgid "Step 7: Update the Application" +msgstr "步骤 3:自定义序列化" -#: ../../source/how-to-authenticate-supernodes.rst:4 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker.rst:346 msgid "" -"Flower has built-in support for authenticated SuperNodes that you can use" -" to verify the identities of each SuperNode connecting to a SuperLink. " -"Flower node authentication works similar to how GitHub SSH authentication" -" works:" +"Change the application code. For example, change the ``seed`` in " +"``quickstart_docker/task.py`` to ``43`` and save it:" msgstr "" -"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " -"GitHub SSH 身份验证的工作方式类似:" -#: ../../source/how-to-authenticate-supernodes.rst:7 +#: ../../source/docker/tutorial-quickstart-docker.rst:349 #, fuzzy -msgid "SuperLink (server) stores a list of known (client) node public keys" -msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" +msgid "quickstart_docker/task.py" +msgstr "快速入门Pandas" -#: ../../source/how-to-authenticate-supernodes.rst:8 +#: ../../source/docker/tutorial-quickstart-docker.rst:356 #, fuzzy -msgid "" -"Using ECDH, both SuperNode and SuperLink independently derive a shared " -"secret" -msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" +msgid "Stop the current ClientApp containers:" +msgstr "当前客户端属性。" -#: ../../source/how-to-authenticate-supernodes.rst:9 +#: ../../source/docker/tutorial-quickstart-docker.rst:362 #, fuzzy -msgid "" -"Shared secret is used to compute the HMAC value of the message sent from " -"SuperNode to SuperLink as a token" -msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" +msgid "Rebuild the FAB and ClientApp image:" +msgstr "加载数据" -#: ../../source/how-to-authenticate-supernodes.rst:10 -#, fuzzy -msgid "SuperLink verifies the token" -msgstr "超级链接验证令牌" +#: ../../source/docker/tutorial-quickstart-docker.rst:368 +msgid "Launch two new ClientApp containers based on the newly built image:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:12 -#, fuzzy -msgid "" -"We recommend you to check out the complete `code example " -"`_ demonstrating federated learning with Flower in an " -"authenticated setting." +#: ../../source/docker/tutorial-quickstart-docker.rst:383 +msgid "Run the updated project:" msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" -#: ../../source/how-to-authenticate-supernodes.rst:15 -#, fuzzy -msgid "" -"This guide covers a preview feature that might change in future versions " -"of Flower." -msgstr "本指南涵盖的预览功能可能会在 Flower 的未来版本中有所改变。" +#: ../../source/docker/tutorial-quickstart-docker.rst:390 +msgid "Step 8: Clean Up" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:18 -#, fuzzy -msgid "" -"For increased security, node authentication can only be used when " -"encrypted connections (SSL/TLS) are enabled." -msgstr "为提高安全性,只有启用加密连接(SSL/TLS)时才能使用节点验证。" +#: ../../source/docker/tutorial-quickstart-docker.rst:392 +msgid "Remove the containers and the bridge network:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:408 +#: ../../source/docker/tutorial-quickstart-docker.rst:404 #, fuzzy -msgid "Enable node authentication in :code:`SuperLink`" -msgstr "在 :code:`SuperLink` 中启用节点验证" +msgid "Where to Go Next" +msgstr "从哪里开始" -#: ../../source/how-to-authenticate-supernodes.rst:23 -#, fuzzy -msgid "" -"To enable node authentication, first you need to configure SSL/TLS " -"connections to secure the SuperLink<>SuperNode communication. You can " -"find the complete guide `here `_. After configuring secure connections, you" -" can enable client authentication in a long-running Flower " -":code:`SuperLink`. Use the following terminal command to start a Flower " -":code:`SuperNode` that has both secure connections and node " -"authentication enabled:" +#: ../../source/docker/tutorial-quickstart-docker.rst:406 +msgid ":doc:`enable-tls`" msgstr "" -"要启用节点验证,首先需要配置 SSL/TLS 连接,以确保 SuperLink<>SuperNode 通信的安全。您可以在 " -"`_ " -"找到完整的指南。配置安全连接后,您就可以在长期运行的 Flower " -":code:`SuperLink`中启用客户端身份验证。使用以下终端命令启动一个同时启用了安全连接和节点验证的 Flower " -":code:`SuperNode`:" - -#: ../../source/how-to-authenticate-supernodes.rst:38 -#, fuzzy -msgid "Let's break down the authentication flags:" -msgstr "让我们来分析一下身份验证标志:" -#: ../../source/how-to-authenticate-supernodes.rst:40 -#, fuzzy -msgid "" -"The first flag :code:`--auth-list-public-keys` expects a path to a CSV " -"file storing all known node public keys. You need to store all known node" -" public keys that are allowed to participate in a federation in one CSV " -"file (:code:`.csv`)." +#: ../../source/docker/tutorial-quickstart-docker.rst:407 +msgid ":doc:`persist-superlink-state`" msgstr "" -"第一个标志 :code:`--auth-list-public-keys`(密码:`--auth-list-public-keys`)需要一个 " -"CSV 文件路径,该文件存储了所有已知节点的公钥。您需要在一个 CSV 文件(:code:`.csv`)中存储所有允许参与联盟的已知节点公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:42 -#, fuzzy -msgid "" -"A valid CSV file storing known node public keys should list the keys in " -"OpenSSH format, separated by commas and without any comments. For an " -"example, refer to our code sample, which contains a CSV file with two " -"known node public keys." +#: ../../source/docker/tutorial-quickstart-docker.rst:408 +msgid ":doc:`tutorial-quickstart-docker-compose`" msgstr "" -"存储已知节点公开密钥的有效 CSV 文件应以 OpenSSH " -"格式列出密钥,以逗号分隔,不含任何注释。有关示例,请参阅我们的代码示例,其中包含一个包含两个已知节点公钥的 CSV 文件。" -#: ../../source/how-to-authenticate-supernodes.rst:44 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:2 #, fuzzy +msgid "Quickstart with Docker Compose" +msgstr "快速入门 iOS" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:4 msgid "" -"The second and third flags :code:`--auth-superlink-private-key` and :code" -":`--auth-superlink-public-key` expect paths to the server's private and " -"public keys. For development purposes, you can generate a private and " -"public key pair using :code:`ssh-keygen -t ecdsa -b 384`." +"This quickstart shows you how to set up Flower using Docker Compose in a " +"single command, allowing you to focus on developing your application " +"without worrying about the underlying infrastructure." msgstr "" -"第二和第三个标记 :code:`--auth-superlink-private-key` 和 :code:`--auth-superlink-" -"public-key` 希望指向服务器私钥和公钥的路径。出于开发目的,您可以使用 :code:`ssh-keygen -t ecdsa -b " -"384` 生成一对私钥和公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:47 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:8 msgid "" -"In Flower 1.9, there is no support for dynamically removing, editing, or " -"adding known node public keys to the SuperLink. To change the set of " -"known nodes, you need to shut the server down, edit the CSV file, and " -"start the server again. Support for dynamically changing the set of known" -" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." +"You will also learn how to easily enable TLS encryption and persist " +"application state locally, giving you the freedom to choose the " +"configuration that best suits your project's needs." msgstr "" -"在 Flower 1.9 中,超级链接不支持动态删除、编辑或添加已知节点公钥。要更改已知节点集,您需要关闭服务器,编辑 CSV " -"文件,然后重新启动服务器。动态更改已知节点集的支持已列入 Flower 1.10(预计发布时间:6 月)的路线图。" -#: ../../source/how-to-authenticate-supernodes.rst:53 -#, fuzzy -msgid "Enable node authentication in :code:`SuperNode`" -msgstr "在 :code:`SuperNode` 中启用节点验证" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:24 +msgid "Clone the Docker Compose ``complete`` directory:" +msgstr "" -#: ../../source/how-to-authenticate-supernodes.rst:55 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:38 msgid "" -"Similar to the long-running Flower server (:code:`SuperLink`), you can " -"easily enable node authentication in the long-running Flower client " -"(:code:`SuperNode`). Use the following terminal command to start an " -"authenticated :code:`SuperNode`:" +"Export the path of the newly created project. The path should be relative" +" to the location of the Docker Compose files:" msgstr "" -"与长期运行的 Flower 服务器(:code:`SuperLink`)类似,您也可以在长期运行的 Flower " -"客户端(:code:`SuperNode`)中轻松启用节点身份验证。使用以下终端命令启动已验证的 :code:`SuperNode`:" -#: ../../source/how-to-authenticate-supernodes.rst:66 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:45 msgid "" -"The :code:`--auth-supernode-private-key` flag expects a path to the " -"node's private key file and the :code:`--auth-supernode-public-key` flag " -"expects a path to the node's public key file. For development purposes, " -"you can generate a private and public key pair using :code:`ssh-keygen -t" -" ecdsa -b 384`." +"Setting the ``PROJECT_DIR`` helps Docker Compose locate the " +"``pyproject.toml`` file, allowing it to install dependencies in the " +"SuperExec and SuperNode images correctly." msgstr "" -":code:`--auth-supernode-private-key`标志需要节点私钥文件的路径,:code:`-auth-supernode-" -"public-key`标志需要节点公钥文件的路径。出于开发目的,可以使用 :code:`ssh-keygen -t ecdsa -b 384` " -"生成一对私钥和公钥。" -#: ../../source/how-to-authenticate-supernodes.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:49 #, fuzzy -msgid "Security notice" -msgstr "安全通知" +msgid "Step 2: Run Flower in Insecure Mode" +msgstr "Flower 服务器。" -#: ../../source/how-to-authenticate-supernodes.rst:72 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:51 msgid "" -"The system's security relies on the credentials of the SuperLink and each" -" SuperNode. Therefore, it is imperative to safeguard and safely store the" -" credentials to avoid security risks such as Public Key Infrastructure " -"(PKI) impersonation attacks. The node authentication mechanism also " -"involves human interaction, so please ensure that all of the " -"communication is done in a secure manner, using trusted communication " -"methods." +"To begin, start Flower with the most basic configuration. In this setup, " +"Flower will run without TLS and without persisting the state." msgstr "" -"系统的安全性依赖于超级链接和每个超级节点的凭证。因此,必须保护和安全存储凭证,以避免公钥基础设施 (PKI) " -"假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" - -#: ../../source/how-to-authenticate-supernodes.rst:77 -#: ../../source/how-to-enable-ssl-connections.rst:68 -#: ../../source/how-to-use-built-in-mods.rst:85 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 -msgid "Conclusion" -msgstr "总结" -#: ../../source/how-to-authenticate-supernodes.rst:79 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:56 msgid "" -"You should now have learned how to start a long-running Flower server " -"(:code:`SuperLink`) and client (:code:`SuperNode`) with node " -"authentication enabled. You should also know the significance of the " -"private key and store it safely to minimize security risks." +"Without TLS, the data sent between the services remains **unencrypted**. " +"Use it only for development purposes." msgstr "" -"现在,您应该已经学会了如何启动长期运行的 Flower " -"服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" - -#: ../../source/how-to-configure-clients.rst:2 -msgid "Configure clients" -msgstr "配置客户端" -#: ../../source/how-to-configure-clients.rst:4 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:59 msgid "" -"Along with model parameters, Flower can send configuration values to " -"clients. Configuration values can be used for various purposes. They are," -" for example, a popular way to control client-side hyperparameters from " -"the server." -msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" +"For production-oriented use cases, :ref:`enable TLS` for secure data" +" transmission." +msgstr "" -#: ../../source/how-to-configure-clients.rst:7 -msgid "Configuration values" -msgstr "配置值" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:70 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:184 +#, fuzzy +msgid "``docker compose``: The Docker command to run the Docker Compose tool." +msgstr "`docker run``: 这是运行新 Docker 容器的命令。" -#: ../../source/how-to-configure-clients.rst:9 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:71 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:185 msgid "" -"Configuration values are represented as a dictionary with ``str`` keys " -"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " -"float), ``int``, or ``str`` (or equivalent types in different languages)." -" Here is an example of a configuration dictionary in Python:" +"``-f compose.yml``: Specify the YAML file that contains the basic Flower " +"service definitions." msgstr "" -"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " -"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-configure-clients.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:72 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:190 msgid "" -"Flower serializes these configuration dictionaries (or *config dict* for " -"short) to their ProtoBuf representation, transports them to the client " -"using gRPC, and then deserializes them back to Python dictionaries." +"``--build``: Rebuild the images for each service if they don't already " +"exist." msgstr "" -"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " -"将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-configure-clients.rst:24 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:73 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:191 msgid "" -"Currently, there is no support for directly sending collection types " -"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " -"dictionaries. There are several workarounds to send collections as values" -" by converting them to one of the supported value types (and converting " -"them back on the client-side)." +"``-d``: Detach the containers from the terminal and run them in the " +"background." msgstr "" -"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " -"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" - -#: ../../source/how-to-configure-clients.rst:26 -msgid "" -"One can, for example, convert a list of floating-point numbers to a JSON " -"string, then send the JSON string using the configuration dictionary, and" -" then convert the JSON string back to a list of floating-point numbers on" -" the client." -msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" -#: ../../source/how-to-configure-clients.rst:30 -msgid "Configuration through built-in strategies" -msgstr "通过内置策略进行配置" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:76 +msgid "Step 3: Run the Quickstart Project" +msgstr "" -#: ../../source/how-to-configure-clients.rst:32 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:78 msgid "" -"The easiest way to send configuration values to clients is to use a " -"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" -"called configuration functions. A configuration function is a function " -"that the built-in strategy calls to get the configuration dictionary for " -"the current round. It then forwards the configuration dictionary to all " -"the clients selected during that round." +"Now that the Flower services have been started via Docker Compose, it is " +"time to run the quickstart example." msgstr "" -"向客户端发送配置值的最简单方法是使用内置策略,如 " -":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-configure-clients.rst:34 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:81 msgid "" -"Let's start with a simple example. Imagine we want to send (a) the batch " -"size that the client should use, (b) the current global round of " -"federated learning, and (c) the number of epochs to train on the client-" -"side. Our configuration function could look like this:" -msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" +"To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify" +" the SuperExec addresses in the ``pyproject.toml`` file." +msgstr "" -#: ../../source/how-to-configure-clients.rst:47 -msgid "" -"To make the built-in strategies use this function, we can pass it to " -"``FedAvg`` during initialization using the parameter " -":code:`on_fit_config_fn`:" -msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:84 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:232 +msgid "Add the following lines to the ``quickstart-compose/pyproject.toml``:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:56 -msgid "One the client side, we receive the configuration dictionary in ``fit``:" -msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:86 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:234 +msgid "quickstart-compose/pyproject.toml" +msgstr "" -#: ../../source/how-to-configure-clients.rst:67 -msgid "" -"There is also an `on_evaluate_config_fn` to configure evaluation, which " -"works the same way. They are separate functions because one might want to" -" send different configuration values to `evaluate` (for example, to use a" -" different batch size)." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:93 +msgid "Execute the command to run the quickstart example:" msgstr "" -"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " -"发送不同的配置值(例如,使用不同的批量大小)。" -#: ../../source/how-to-configure-clients.rst:69 -msgid "" -"The built-in strategies call this function every round (that is, every " -"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " -"Calling `on_evaluate_config_fn` every round allows us to vary/change the " -"config dict over consecutive rounds. If we wanted to implement a " -"hyperparameter schedule, for example, to increase the number of local " -"epochs during later rounds, we could do the following:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:99 +msgid "Monitor the SuperExec logs and wait for the summary to appear:" msgstr "" -"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " -"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " -"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-configure-clients.rst:82 -msgid "The :code:`FedAvg` strategy will call this function *every round*." -msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:106 +#, fuzzy +msgid "Step 4: Update the Application" +msgstr "步骤 3:自定义序列化" -#: ../../source/how-to-configure-clients.rst:85 -msgid "Configuring individual clients" -msgstr "配置个别客户端" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:108 +msgid "In the next step, change the application code." +msgstr "" -#: ../../source/how-to-configure-clients.rst:87 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:110 msgid "" -"In some cases, it is necessary to send different configuration values to " -"different clients." -msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" +"For example, go to the ``task.py`` file in the ``quickstart-" +"compose/quickstart_compose/`` directory and add a ``print`` call in the " +"``get_weights`` function:" +msgstr "" -#: ../../source/how-to-configure-clients.rst:89 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:114 +msgid "quickstart-compose/quickstart_compose/task.py" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:125 #, fuzzy +msgid "Rebuild and restart the services." +msgstr "我们已经可以启动*服务器*了:" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:129 msgid "" -"This can be achieved by customizing an existing strategy or by " -":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg`" -" by adding a custom ``\"hello\": \"world\"`` configuration key/value pair" -" to the config dict of a *single client* (only the first client in the " -"list, the other clients in this round to not receive this \"special\" " -"config value):" +"If you have modified the dependencies listed in your ``pyproject.toml`` " +"file, it is essential to rebuild images." msgstr "" -"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " -"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " -"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" -#: ../../source/how-to-configure-logging.rst:2 -msgid "Configure logging" -msgstr "配置日志记录" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:132 +msgid "If you haven't made any changes, you can skip this step." +msgstr "" -#: ../../source/how-to-configure-logging.rst:4 -msgid "" -"The Flower logger keeps track of all core events that take place in " -"federated learning workloads. It presents information by default " -"following a standard message format:" -msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:134 +msgid "Run the following command to rebuild and restart the services:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:13 -msgid "" -"containing relevant information including: log message level (e.g. " -":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " -"took place from, as well as the log message itself. In this way, the " -"logger would typically display information on your terminal as follows:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:140 +msgid "Run the updated quickstart example:" msgstr "" -"相关信息包括:日志信息级别(例如 " -":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" -#: ../../source/how-to-configure-logging.rst:34 -msgid "Saving log to file" -msgstr "将日志保存到文件" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:147 +msgid "In the SuperExec logs, you should find the ``Get weights`` line:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:36 -msgid "" -"By default, the Flower log is outputted to the terminal where you launch " -"your Federated Learning workload from. This applies for both gRPC-based " -"federation (i.e. when you do :code:`fl.server.start_server`) and when " -"using the :code:`VirtualClientEngine` (i.e. when you do " -":code:`fl.simulation.start_simulation`). In some situations you might " -"want to save this log to disk. You can do so by calling the " -"`fl.common.logger.configure() " -"`_" -" function. For example:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:164 +msgid "Step 5: Persisting the SuperLink State" msgstr "" -"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " -":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " -"时(即执行 :code:`fl.simulation.start_simulation` " -"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " -"`_" -" 函数。例如:" -#: ../../source/how-to-configure-logging.rst:53 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:166 msgid "" -"With the above, Flower will record the log you see on your terminal to " -":code:`log.txt`. This file will be created in the same directory as were " -"you are running the code from. If we inspect we see the log above is also" -" recorded but prefixing with :code:`identifier` each line:" +"In this step, Flower services are configured to persist the state of the " +"SuperLink service, ensuring that it maintains its state even after a " +"restart." msgstr "" -"通过上述操作,Flower 会将您在终端上看到的日志记录到 " -":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " -":code:`identifier` 作为前缀:" -#: ../../source/how-to-configure-logging.rst:74 -msgid "Log your own messages" -msgstr "记录自己的信息" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:171 +msgid "" +"When working with Docker Compose on Linux, you may need to create the " +"``state`` directory first and change its ownership to ensure proper " +"access and permissions." +msgstr "" -#: ../../source/how-to-configure-logging.rst:76 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:174 msgid "" -"You might expand the information shown by default with the Flower logger " -"by adding more messages relevant to your application. You can achieve " -"this easily as follows." -msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" +"For more information, consult the following page: :doc:`persist-" +"superlink-state`." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:176 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:226 +msgid "Run the command:" +msgstr "" -#: ../../source/how-to-configure-logging.rst:102 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"In this way your logger will show, in addition to the default messages, " -"the ones introduced by the clients as specified above." -msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" +"``-f with-state.yml``: Specifies the path to an additional Docker Compose" +" file that" +msgstr "" -#: ../../source/how-to-configure-logging.rst:128 -msgid "Log to a remote service" -msgstr "登录远程服务" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst +msgid "contains the configuration for persisting the SuperLink state." +msgstr "" -#: ../../source/how-to-configure-logging.rst:130 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst msgid "" -"The :code:`fl.common.logger.configure` function, also allows specifying a" -" host to which logs can be pushed (via :code:`POST`) through a native " -"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" -" feature in :code:`gRPC`-based Federated Learning workloads where " -"otherwise gathering logs from all entities (i.e. the server and the " -"clients) might be cumbersome. Note that in Flower simulation, the server " -"automatically displays all logs. You can still specify a " -":code:`HTTPHandler` should you wish to backup or analyze the logs " -"somewhere else." +"Docker merges Compose files according to `merging rules " +"`_." msgstr "" -"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " -":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " -":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " -"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" -#: ../../source/how-to-enable-ssl-connections.rst:2 -msgid "Enable SSL connections" -msgstr "启用 SSL 连接" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:193 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:247 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:375 +msgid "Rerun the ``quickstart-compose`` project:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:4 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:199 +msgid "Check the content of the ``state`` directory:" +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:206 msgid "" -"This guide describes how to a SSL-enabled secure Flower server " -"(:code:`SuperLink`) can be started and how a Flower client " -"(:code:`SuperNode`) can establish a secure connections to it." -msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" +"You should see a ``state.db`` file in the ``state`` directory. If you " +"restart the service, the state file will be used to restore the state " +"from the previously saved data. This ensures that the data persists even " +"if the containers are stopped and started again." +msgstr "" + +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:214 +msgid "Step 6: Run Flower with TLS" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:7 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:216 msgid "" -"A complete code example demonstrating a secure connection can be found " -"`here `_." +"To demonstrate how to enable TLS, generate self-signed certificates using" +" the ``certs.yml`` Compose file." msgstr "" -"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-enable-ssl-connections.rst:10 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:223 msgid "" -"The code example comes with a :code:`README.md` file which explains how " -"to start it. Although it is already SSL-enabled, it might be less " -"descriptive on how it does so. Stick to this guide for a deeper " -"introduction to the topic." -msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" +"For production environments, use a service like `Let's Encrypt " +"`_ to obtain your certificates." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:16 -msgid "Certificates" -msgstr "证书" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 +msgid "Restart the services with TLS enabled:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:18 -#, fuzzy -msgid "" -"Using SSL-enabled connections requires certificates to be passed to the " -"server and client. For the purpose of this guide we are going to generate" -" self-signed certificates. As this can become quite complex we are going " -"to ask you to run the script in :code:`examples/advanced-" -"tensorflow/certificates/generate.sh` with the following command sequence:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:255 +msgid "Step 7: Add another SuperNode" msgstr "" -"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " -":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" -#: ../../source/how-to-enable-ssl-connections.rst:29 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:257 msgid "" -"This will generate the certificates in :code:`examples/advanced-" -"tensorflow/.cache/certificates`." -msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" +"You can add more SuperNodes and ClientApps by duplicating their " +"definitions in the ``compose.yml`` file." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:31 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:260 msgid "" -"The approach for generating SSL certificates in the context of this " -"example can serve as an inspiration and starting point, but it should not" -" be used as a reference for production environments. Please refer to " -"other sources regarding the issue of correctly generating certificates " -"for production environments. For non-critical prototyping or research " -"projects, it might be sufficient to use the self-signed certificates " -"generated using the scripts mentioned in this guide." -msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" +"Just give each new SuperNode and ClientApp service a unique service name " +"like ``supernode-3``, ``clientapp-3``, etc." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:39 -#, fuzzy -msgid "Server (SuperLink)" -msgstr "flower-superlink" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:263 +msgid "In ``compose.yml``, add the following:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:41 -#, fuzzy -msgid "" -"Use the following terminal command to start a sever (SuperLink) that uses" -" the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:265 +msgid "compose.yml" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:50 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:316 msgid "" -"When providing certificates, the server expects a tuple of three " -"certificates paths: CA certificate, server certificate and server private" -" key." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" - -#: ../../source/how-to-enable-ssl-connections.rst:54 -#, fuzzy -msgid "Client (SuperNode)" -msgstr "客户端状态代码。" +"If you also want to enable TLS for the new SuperNodes, duplicate the " +"SuperNode definition for each new SuperNode service in the ``with-" +"tls.yml`` file." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:56 -#, fuzzy +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:319 msgid "" -"Use the following terminal command to start a client (SuperNode) that " -"uses the previously generated certificates:" -msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" +"Make sure that the names of the services match with the one in the " +"``compose.yml`` file." +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:64 -#, fuzzy -msgid "" -"When setting :code:`root_certificates`, the client expects a file path to" -" PEM-encoded root certificates." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:321 +msgid "In ``with-tls.yml``, add the following:" msgstr "" -"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " -":code:`Path` 来简化以字节字符串形式读取证书的过程。" -#: ../../source/how-to-enable-ssl-connections.rst:70 -#, fuzzy -msgid "" -"You should now have learned how to generate self-signed certificates " -"using the given script, start an SSL-enabled server and have a client " -"establish a secure connection to it." -msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:323 +msgid "with-tls.yml" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:75 -msgid "Additional resources" -msgstr "补充资源" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:345 +msgid "Step 8: Persisting the SuperLink State and Enabling TLS" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:77 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:347 msgid "" -"These additional sources might be relevant if you would like to dive " -"deeper into the topic of certificates:" -msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" +"To run Flower with persisted SuperLink state and enabled TLS, a slight " +"change in the ``with-state.yml`` file is required:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:79 -msgid "`Let's Encrypt `_" -msgstr "`让我们加密 `_" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:350 +msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" +msgstr "" -#: ../../source/how-to-enable-ssl-connections.rst:80 -msgid "`certbot `_" -msgstr "`certbot `_" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:352 +msgid "with-state.yml" +msgstr "" -#: ../../source/how-to-implement-strategies.rst:2 -msgid "Implement strategies" -msgstr "实施策略" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 +#, fuzzy +msgid "Restart the services:" +msgstr "启动服务器" -#: ../../source/how-to-implement-strategies.rst:4 -msgid "" -"The strategy abstraction enables implementation of fully custom " -"strategies. A strategy is basically the federated learning algorithm that" -" runs on the server. Strategies decide how to sample clients, how to " -"configure clients for training, how to aggregate updates, and how to " -"evaluate models. Flower provides a few built-in strategies which are " -"based on the same API described below." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:383 +msgid "Step 9: Merge Multiple Compose Files" msgstr "" -"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" -" 提供了一些内置策略,这些策略基于下文所述的相同 API。" - -#: ../../source/how-to-implement-strategies.rst:11 -msgid "The :code:`Strategy` abstraction" -msgstr ":code:`策略 ` 抽象类" -#: ../../source/how-to-implement-strategies.rst:13 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:385 msgid "" -"All strategy implementation are derived from the abstract base class " -":code:`flwr.server.strategy.Strategy`, both built-in implementations and " -"third party implementations. This means that custom strategy " -"implementations have the exact same capabilities at their disposal as " -"built-in ones." +"You can merge multiple Compose files into a single file. For instance, if" +" you wish to combine the basic configuration with the TLS configuration, " +"execute the following command:" msgstr "" -"所有策略实现均源自抽象基类 " -":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" -#: ../../source/how-to-implement-strategies.rst:18 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:394 msgid "" -"The strategy abstraction defines a few abstract methods that need to be " -"implemented:" -msgstr "策略抽象定义了一些需要实现的抽象方法:" +"This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" +" a new file called ``my_compose.yml``." +msgstr "" -#: ../../source/how-to-implement-strategies.rst:75 -msgid "" -"Creating a new strategy means implementing a new :code:`class` (derived " -"from the abstract base class :code:`Strategy`) that implements for the " -"previously shown abstract methods:" -msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:398 +msgid "Step 10: Clean Up" +msgstr "" -#: ../../source/how-to-implement-strategies.rst:100 -msgid "The Flower server calls these methods in the following order:" -msgstr "Flower 服务器按以下顺序调用这些方法:" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:400 +#, fuzzy +msgid "Remove all services and volumes:" +msgstr "从 R 中删除所有项目。" -#: ../../source/how-to-implement-strategies.rst:177 -msgid "The following sections describe each of those methods in more detail." -msgstr "下文将详细介绍每种方法。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:410 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "快速入门 iOS" -#: ../../source/how-to-implement-strategies.rst:180 -msgid "The :code:`initialize_parameters` method" -msgstr ":code:`初始化参数` 方法" +#: ../../source/docker/use-a-different-version.rst:2 +#, fuzzy +msgid "Use a Different Flower Version" +msgstr "使用不同的 Flower 或 Python 版本" -#: ../../source/how-to-implement-strategies.rst:182 +#: ../../source/docker/use-a-different-version.rst:4 +#, fuzzy msgid "" -":code:`initialize_parameters` is called only once, at the very beginning " -"of an execution. It is responsible for providing the initial global model" -" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +"If you want to use a different version of Flower, for example Flower " +"nightly, you can do so by changing the tag. All available versions are on" +" `Docker Hub `__." msgstr "" -":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " -":code:`Parameters` 对象)提供初始全局模型参数。" - -#: ../../source/how-to-implement-strategies.rst:184 -msgid "" -"Built-in strategies return user-provided initial parameters. The " -"following example shows how initial parameters can be passed to " -":code:`FedAvg`:" -msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" +"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " +"`_ 上找到。" -#: ../../source/how-to-implement-strategies.rst:209 +#: ../../source/docker/use-a-different-version.rst:10 +#, fuzzy msgid "" -"The Flower server will call :code:`initialize_parameters`, which either " -"returns the parameters that were passed to :code:`initial_parameters`, or" -" :code:`None`. If no parameters are returned from " -":code:`initialize_parameters` (i.e., :code:`None`), the server will " -"randomly select one client and ask it to provide its parameters. This is " -"a convenience feature and not recommended in practice, but it can be " -"useful for prototyping. In practice, it is recommended to always use " -"server-side parameter initialization." +"When using Flower nightly, the SuperLink nightly image must be paired " +"with the corresponding SuperNode and ServerApp nightly images released on" +" the same day. To ensure the versions are in sync, using the concrete " +"tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is recommended." msgstr "" -"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " -":code:`initial_parameters` 的参数或 :code:`None`。如果 " -":code:`initialize_parameters` 没有返回任何参数(即 " -":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" +"超级节点 Docker 映像目前仅适用于 1.9.0-nightly 版本。稳定版将在 Flower " +"1.9.0(稳定版)发布时推出(预计发布时间:5 " +"月)。超级节点夜间镜像必须与同一天发布的相应超级链接和服务器应用程序夜间镜像配对。为确保版本同步,建议使用具体标签,例如``1.9.0.dev20240501``,而不是``nightly``。" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "示例: PyTorch 中的 FedBN - 从集中式到联邦式" -#: ../../source/how-to-implement-strategies.rst:213 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +#, fuzzy msgid "" -"Server-side parameter initialization is a powerful mechanism. It can be " -"used, for example, to resume training from a previously saved checkpoint." -" It is also the fundamental capability needed to implement hybrid " -"approaches, for example, to fine-tune a pre-trained model using federated" -" learning." -msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to :doc:`Example: " +"PyTorch - From Centralized To Federated `." +msgstr "" +"本教程将向您展示如何使用 Flower 为现有的机器学习框架构建一个联邦学习的版本,并使用 \"FedBN `_\"(一种针对非 iid 数据设计的联邦训练策略)。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络(带有Batch Normalization层)。在应用 FedBN 时,只需对 `示例: PyTorch - " +"从集中式到联邦式 `_ 做少量改动。" -#: ../../source/how-to-implement-strategies.rst:216 -msgid "The :code:`configure_fit` method" -msgstr ":code:`configure_fit`方法" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:12 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "Centralized Training" +msgstr "集中式训练" -#: ../../source/how-to-implement-strategies.rst:218 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:14 +#, fuzzy msgid "" -":code:`configure_fit` is responsible for configuring the upcoming round " -"of training. What does *configure* mean in this context? Configuring a " -"round means selecting clients and deciding what instructions to send to " -"these clients. The signature of :code:`configure_fit` makes this clear:" +"All files are revised based on :doc:`Example: PyTorch - From Centralized " +"To Federated `. The only " +"thing to do is modifying the file called ``cifar.py``, revised part is " +"shown below:" msgstr "" -":code:`configure_fit` " -"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" -" 说明了这一点:" +"所有文件均根据 `示例: PyTorch -从集中式到联邦式 `_。唯一要做的就是修改名为 :code:`cifar.py` " +"的文件,修改部分如下所示:" -#: ../../source/how-to-implement-strategies.rst:231 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:18 msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_fit`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "类 Net() 中定义的模型架构会相应添加Batch Normalization层。" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:171 +msgid "You can now run your machine learning workload:" +msgstr "现在,您可以运行您的机器学习工作了:" -#: ../../source/how-to-implement-strategies.rst:233 -#: ../../source/how-to-implement-strategies.rst:280 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +#, fuzzy msgid "" -"Use the :code:`client_manager` to randomly sample all (or a subset of) " -"available clients (each represented as a :code:`ClientProxy` object)" +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the system consists of one server" +" and two clients." msgstr "" -"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " -"对象)" +"到目前为止,如果您以前使用过 PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,使用我们所构建的内容在 FedBN " +"中创建一个联邦学习系统,该系统由一个服务器和两个客户端组成。" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:182 +msgid "Federated Training" +msgstr "联邦培训" -#: ../../source/how-to-implement-strategies.rst:234 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:60 +#, fuzzy msgid "" -"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " -"current global model :code:`parameters` and :code:`config` dict" +"If you have read :doc:`Example: PyTorch - From Centralized To Federated " +"`, the following parts are" +" easy to follow, only ``get_parameters`` and ``set_parameters`` function " +"in ``client.py`` needed to revise. If not, please read the :doc:`Example:" +" PyTorch - From Centralized To Federated `. first." msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`FitIns` 配对" +"如果你读过 `示例: PyTorch - 从集中式到联邦式 `_,下面的部分就很容易理解了,只需要修改 " +":code:`get_parameters` 和 :code:`set_parameters` 中的 :code:`client.py` " +"函数。如果没有,请阅读 `示例: PyTorch - 从集中式到联邦式 `_。" -#: ../../source/how-to-implement-strategies.rst:236 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:66 #, fuzzy msgid "" -"More sophisticated implementations can use :code:`configure_fit` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_fit`." -msgstr "" -"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" +"Our example consists of one *server* and two *clients*. In FedBN, " +"``server.py`` keeps unchanged, we can start the server directly." +msgstr "我们的示例包括一个*服务器*和两个*客户端*。在 FedBN 中,:code:`server.py` 保持不变,我们可以直接启动服务器。" -#: ../../source/how-to-implement-strategies.rst:240 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:73 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to train, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Finally, we will revise our *client* logic by changing ``get_parameters``" +" and ``set_parameters`` in ``client.py``, we will exclude batch " +"normalization parameters from model parameter list when sending to or " +"receiving from the server." msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" -" :code:`config` dict)。" +"最后,我们将修改 *client* 的逻辑,修改 :code:`client.py` 中的 :code:`get_parameters` 和 " +":code:`set_parameters`,在向服务器发送或从服务器接收时,我们将从模型参数列表中排除batch " +"normalization层的参数。" -#: ../../source/how-to-implement-strategies.rst:243 -msgid "The :code:`aggregate_fit` method" -msgstr ":code:`aggregate_fit` 方法" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:102 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "现在,您可以打开另外两个终端窗口并运行程序" -#: ../../source/how-to-implement-strategies.rst:245 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:108 msgid "" -":code:`aggregate_fit` is responsible for aggregating the results returned" -" by the clients that were selected and asked to train in " -":code:`configure_fit`." -msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" +msgstr "确保服务器仍在运行后,然后您就能看到您的 PyTorch 项目(之前是集中式的)通过 FedBN 策略在两个客户端上运行联合学习。祝贺!" -#: ../../source/how-to-implement-strategies.rst:258 -msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " -"of :code:`results`, but also a list of :code:`failures`." -msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " -":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:113 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:349 +#: ../../source/tutorial-quickstart-jax.rst:319 +msgid "Next Steps" +msgstr "下一步工作" -#: ../../source/how-to-implement-strategies.rst:260 +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:115 msgid "" -":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" -" dictionary of aggregated metrics. The :code:`Parameters` return value is" -" optional because :code:`aggregate_fit` might decide that the results " -"provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" -":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " -"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " -"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" - -#: ../../source/how-to-implement-strategies.rst:263 -msgid "The :code:`configure_evaluate` method" -msgstr ":code:`configure_evaluate`方法" - -#: ../../source/how-to-implement-strategies.rst:265 -msgid "" -":code:`configure_evaluate` is responsible for configuring the upcoming " -"round of evaluation. What does *configure* mean in this context? " -"Configuring a round means selecting clients and deciding what " -"instructions to send to these clients. The signature of " -":code:`configure_evaluate` makes this clear:" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" msgstr "" -":code:`configure_evaluate` " -"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" -" 说明了这一点:" +"本示例的完整源代码可在 `_ " +"找到。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。让我们准备好进一步探讨这一主题。如在每个客户端使用不同的 " +"CIFAR-10 子集,或者增加客户端的数量。" -#: ../../source/how-to-implement-strategies.rst:278 -msgid "" -"The return value is a list of tuples, each representing the instructions " -"that will be sent to a particular client. Strategy implementations " -"usually perform the following steps in :code:`configure_evaluate`:" -msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "实例: PyTorch - 从集中式到联邦式" -#: ../../source/how-to-implement-strategies.rst:281 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 msgid "" -"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " -"the current global model :code:`parameters` and :code:`config` dict" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." msgstr "" -"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " -"dict 的 :code:`EvaluateIns` 配对" +"本教程将向您展示如何使用 Flower 构建现有机器学习工作的联邦版本。我们使用 PyTorch 在 CIFAR-10 " +"数据集上训练一个卷积神经网络。首先,我们基于 \"Deep Learning with PyTorch " +"`_\"教程,采用集中式训练方法介绍了这项机器学习任务。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" -#: ../../source/how-to-implement-strategies.rst:283 -#, fuzzy +#: ../../source/example-pytorch-from-centralized-to-federated.rst:14 msgid "" -"More sophisticated implementations can use :code:`configure_evaluate` to " -"implement custom client selection logic. A client will only participate " -"in a round if the corresponding :code:`ClientProxy` is included in the " -"list returned from :code:`configure_evaluate`." +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." msgstr "" -"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " -":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" +"我们首先简要介绍一下集中式 CNN 训练代码。如果您想获得更深入的解释,请参阅 PyTorch 官方教程`PyTorch tutorial " +"`_。" -#: ../../source/how-to-implement-strategies.rst:287 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:18 +#, fuzzy msgid "" -"The structure of this return value provides a lot of flexibility to the " -"user. Since instructions are defined on a per-client basis, different " -"instructions can be sent to each client. This enables custom strategies " -"to evaluate, for example, different models on different clients, or use " -"different hyperparameters on different clients (via the :code:`config` " -"dict)." +"Let's create a new file called ``cifar.py`` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as ``torch`` and ``torchvision``) need to be " +"imported. You can see that we do not import any package for federated " +"learning. You can keep all these imports as they are even when we add the" +" federated learning components at a later point." msgstr "" -"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" -" :code:`config` dict)。" - -#: ../../source/how-to-implement-strategies.rst:291 -msgid "The :code:`aggregate_evaluate` method" -msgstr ":code:`aggregate_evaluate` 方法" +"让我们创建一个名为 :code:`cifar.py` 的新文件,其中包含 CIFAR-10 " +"传统(集中)培训所需的所有组件。首先,需要导入所有必需的软件包(如 :code:`torch` 和 " +":code:`torchvision`)。您可以看到,我们没有导入任何用于联邦学习的软件包。即使在以后添加联邦学习组件时,也可以保留所有这些导入。" -#: ../../source/how-to-implement-strategies.rst:293 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:36 +#, fuzzy msgid "" -":code:`aggregate_evaluate` is responsible for aggregating the results " -"returned by the clients that were selected and asked to evaluate in " -":code:`configure_evaluate`." +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in ``class Net()``." msgstr "" -":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " -"中选择并要求评估的客户端返回的结果。" +"如前所述,我们将使用 CIFAR-10 数据集进行机器学习。模型架构(一个非常简单的卷积神经网络)在 :code:`class Net()` " +"中定义。" -#: ../../source/how-to-implement-strategies.rst:306 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:62 +#, fuzzy msgid "" -"Of course, failures can happen, so there is no guarantee that the server " -"will get results from all the clients it sent instructions to (via " -":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " -"receives a list of :code:`results`, but also a list of :code:`failures`." +"The ``load_data()`` function loads the CIFAR-10 training and test sets. " +"The ``transform`` normalized the data after loading." msgstr "" -"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " -":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " -":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" +":code:`load_data()` 函数加载 CIFAR-10 " +"训练集和测试集。加载数据后,:code:`transform`函数对数据进行了归一化处理。" -#: ../../source/how-to-implement-strategies.rst:308 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:84 +#, fuzzy msgid "" -":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" -" dictionary of aggregated metrics. The :code:`float` return value is " -"optional because :code:`aggregate_evaluate` might decide that the results" -" provided are not sufficient for aggregation (e.g., too many failures)." -msgstr "" -":code:`aggregate_evaluate` 返回一个可选的 " -":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " -":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" +"We now need to define the training (function ``train()``) which loops " +"over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." +msgstr "现在,我们需要定义训练函数(:code:`train()`),该函数在训练集上循环训练,计算损失值并反向传播,然后为每批训练数据在优化器上执行一个优化步骤。" -#: ../../source/how-to-implement-strategies.rst:311 -msgid "The :code:`evaluate` method" -msgstr ":code:`evaluate`方法" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:88 +#, fuzzy +msgid "" +"The evaluation of the model is defined in the function ``test()``. The " +"function loops over all test samples and measures the loss of the model " +"based on the test dataset." +msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并计算测试数据集的模型损失值。" -#: ../../source/how-to-implement-strategies.rst:313 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:149 msgid "" -":code:`evaluate` is responsible for evaluating model parameters on the " -"server-side. Having :code:`evaluate` in addition to " -":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " -"to perform both servers-side and client-side (federated) evaluation." -msgstr "" -":code:`evaluate` 负责在服务器端评估模型参数。除了 " -":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" -" 可以使策略同时执行服务器端和客户端(联邦)评估。" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "在确定了数据加载、模型架构、训练和评估之后,我们就可以将所有整合在一起,在 CIFAR-10 上训练我们的 CNN。" -#: ../../source/how-to-implement-strategies.rst:323 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:177 msgid "" -"The return value is again optional because the strategy might not need to" -" implement server-side evaluation or because the user-defined " -":code:`evaluate` method might not complete successfully (e.g., it might " -"fail to load the server-side evaluation data)." +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." msgstr "" -"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " -"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" - -#: ../../source/how-to-install-flower.rst:2 -msgid "Install Flower" -msgstr "安装Flower" - -#: ../../source/how-to-install-flower.rst:6 -msgid "Python version" -msgstr "Python 版本" - -#: ../../source/how-to-install-flower.rst:12 -msgid "Install stable release" -msgstr "安装稳定版" - -#: ../../source/how-to-install-flower.rst:15 -#: ../../source/how-to-upgrade-to-flower-next.rst:46 -#, fuzzy -msgid "Using pip" -msgstr "使用 pip" +"到目前为止,如果你以前用过 " +"PyTorch,这一切看起来应该相当熟悉。让我们进行下一步,利用我们所构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" -#: ../../source/how-to-install-flower.rst:17 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:184 msgid "" -"Stable releases are available on `PyPI " -"`_::" -msgstr "稳定版本可在 `PyPI `_::" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." +msgstr "上一节讨论的简单机器学习项目在单一数据集(CIFAR-10)上训练模型,我们称之为集中学习。如上一节所示,集中学习的概念可能为大多数人所熟知,而且很多人以前都使用过。通常情况下,如果要以联邦方式运行机器学习工作,就必须更改大部分代码,并从头开始设置一切。这可能是一个相当大的工作量。" -#: ../../source/how-to-install-flower.rst:21 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` should be " -"installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "不过,有了 Flower,您可以轻松地将已有的代码转变成联邦学习的模式,无需进行大量重写。" -#: ../../source/how-to-install-flower.rst:27 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:194 #, fuzzy -msgid "Using conda (or mamba)" -msgstr "使用 conda(或 mamba)" +msgid "" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in ``cifar.py`` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." +msgstr "" +"这个概念很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +":code:`cifar.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" -#: ../../source/how-to-install-flower.rst:29 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:201 +#: ../../source/tutorial-quickstart-jax.rst:147 #, fuzzy -msgid "Flower can also be installed from the ``conda-forge`` channel." -msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" +msgid "" +"Our example consists of one *server* and two *clients*. Let's set up " +"``server.py`` first. The *server* needs to import the Flower package " +"``flwr``. Next, we use the ``start_server`` function to start a server " +"and tell it to perform three rounds of federated learning." +msgstr "" +"我们的示例包括一个*服务器*和两个*客户端*。让我们先设置 :code:`server.py`。*服务器*需要导入 Flower 软件包 " +":code:`flwr`。接下来,我们使用 :code:`start_server` 函数启动服务器,并让它执行三轮联邦学习。" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:215 +#: ../../source/tutorial-quickstart-jax.rst:161 +msgid "We can already start the *server*:" +msgstr "我们已经可以启动*服务器*了:" -#: ../../source/how-to-install-flower.rst:31 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 #, fuzzy msgid "" -"If you have not added ``conda-forge`` to your channels, you will first " -"need to run the following::" -msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined centralized training in ``cifar.py``. Our " +"*client* needs to import ``flwr``, but also ``torch`` to update the " +"parameters on our PyTorch model:" +msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 :code:`cifar.py` " +"中定义的集中式训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 :code:`torch`,以更新 " +"PyTorch 模型的参数:" -#: ../../source/how-to-install-flower.rst:36 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:238 #, fuzzy msgid "" -"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " -"installed with ``conda``::" -msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``CifarClient``. ``NumPyClient`` is slightly easier to implement " +"than ``Client`` if you use a framework with good NumPy interoperability " +"(like PyTorch or TensorFlow/Keras) because it avoids some of the " +"boilerplate that would otherwise be necessary. ``CifarClient`` needs to " +"implement four methods, two methods for getting/setting model parameters," +" one method for training the model, and one method for testing the model:" +msgstr "" +"实现 Flower *client*基本上意味着实现 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`CifarClient`。如果使用具有良好 NumPy" +" 互操作性的框架(如 PyTorch 或 TensorFlow/Keras),:code:`NumPyClient`的实现比 " +":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/how-to-install-flower.rst:40 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 #, fuzzy -msgid "or with ``mamba``::" -msgstr "或用 ``mamba`` ::" +msgid "``set_parameters``" +msgstr ":code:`set_parameters`" -#: ../../source/how-to-install-flower.rst:46 -msgid "Verify installation" -msgstr "验证安装" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:248 +#: ../../source/tutorial-quickstart-jax.rst:192 +msgid "" +"set the model parameters on the local model that are received from the " +"server" +msgstr "在本地模型上设置从服务器接收的模型参数" -#: ../../source/how-to-install-flower.rst:48 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:249 +#: ../../source/tutorial-quickstart-jax.rst:194 #, fuzzy msgid "" -"The following command can be used to verify if Flower was successfully " -"installed. If everything worked, it should print the version of Flower to" -" the command line::" -msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" - -#: ../../source/how-to-install-flower.rst:55 -msgid "Advanced installation options" -msgstr "高级安装选项" +"loop over the list of model parameters received as NumPy ``ndarray``'s " +"(think list of neural network layers)" +msgstr "循环遍历以 NumPy :code:`ndarray` 形式接收的模型参数列表(可以看作神经网络的列表)" -#: ../../source/how-to-install-flower.rst:58 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 +#: ../../source/tutorial-quickstart-scikitlearn.rst:129 #, fuzzy -msgid "Install via Docker" -msgstr "安装Flower" +msgid "``get_parameters``" +msgstr ":code:`get_parameters`" -#: ../../source/how-to-install-flower.rst:60 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:252 +#: ../../source/tutorial-quickstart-jax.rst:197 #, fuzzy -msgid ":doc:`How to run Flower using Docker `" +msgid "" +"get the model parameters and return them as a list of NumPy ``ndarray``'s" +" (which is what ``flwr.client.NumPyClient`` expects)" msgstr "" -"`TensorFlow快速入门 (教程) `_" +"获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " +":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/how-to-install-flower.rst:63 -msgid "Install pre-release" -msgstr "安装预发布版本" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "``fit``" +msgstr "" -#: ../../source/how-to-install-flower.rst:65 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:255 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:260 +#: ../../source/tutorial-quickstart-jax.rst:200 +#: ../../source/tutorial-quickstart-jax.rst:205 msgid "" -"New (possibly unstable) versions of Flower are sometimes available as " -"pre-release versions (alpha, beta, release candidate) before the stable " -"release happens::" -msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "用从服务器接收到的参数更新本地模型的参数" -#: ../../source/how-to-install-flower.rst:69 -msgid "" -"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" -" should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:257 +#: ../../source/tutorial-quickstart-jax.rst:202 +msgid "train the model on the local training set" +msgstr "在本地训练集上训练模型" -#: ../../source/how-to-install-flower.rst:74 -msgid "Install nightly release" -msgstr "安装隔夜版本" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:258 +msgid "get the updated local model weights and return them to the server" +msgstr "获取更新后的本地模型参数并发送回服务器" -#: ../../source/how-to-install-flower.rst:76 -msgid "" -"The latest (potentially unstable) changes in Flower are available as " -"nightly releases::" -msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +#: ../../source/tutorial-quickstart-jax.rst:208 +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +#, fuzzy +msgid "``evaluate``" +msgstr ":code:`evaluate`" -#: ../../source/how-to-install-flower.rst:80 -msgid "" -"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " -"should be installed with the ``simulation`` extra::" -msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:262 +#: ../../source/tutorial-quickstart-jax.rst:207 +msgid "evaluate the updated model on the local test set" +msgstr "在本地测试集上评估更新后的模型" -#: ../../source/how-to-monitor-simulation.rst:2 -msgid "Monitor simulation" -msgstr "监控模拟" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:263 +msgid "return the local loss and accuracy to the server" +msgstr "向服务器返回本地损失值和精确度" -#: ../../source/how-to-monitor-simulation.rst:4 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:265 +#, fuzzy msgid "" -"Flower allows you to monitor system resources while running your " -"simulation. Moreover, the Flower simulation engine is powerful and " -"enables you to decide how to allocate resources per client manner and " -"constrain the total usage. Insights from resource consumption can help " -"you make smarter decisions and speed up the execution time." +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``test()`` previously defined in ``cifar.py``. " +"So what we really do here is we tell Flower through our ``NumPyClient`` " +"subclass which of our already defined functions to call for training and " +"evaluation. We included type annotations to give you a better " +"understanding of the data types that get passed around." msgstr "" -"Flower 允许您在运行模拟时监控系统资源。此外,Flower " -"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" +"这两个 :code:`NumPyClient` 中的方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`cifar.py` 中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们在这里要做的就是通过 " +":code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让你更好地理解传递的数据类型。" -#: ../../source/how-to-monitor-simulation.rst:6 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:315 +#, fuzzy msgid "" -"The specific instructions assume you are using macOS and have the " -"`Homebrew `_ package manager installed." -msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" +"All that's left to do it to define a function that loads both model and " +"data, creates a ``CifarClient``, and starts this client. You load your " +"data and model by using ``cifar.py``. Start ``CifarClient`` with the " +"function ``fl.client.start_client()`` by pointing it at the same IP " +"address we used in ``server.py``:" +msgstr "剩下的就是定义模型和数据加载函数了。创建一个:code:`CifarClient`类,并运行这个客服端。您将通过:code:`cifar.py`加载数据和模型。另外,通过:code:`fl.client.start_client()`函数来运行客户端:code:`CifarClient`,需要保证IP地址和:code:`server.py`中所使用的一致:" -#: ../../source/how-to-monitor-simulation.rst:10 -msgid "Downloads" -msgstr "下载" +#: ../../source/example-pytorch-from-centralized-to-federated.rst:338 +#: ../../source/tutorial-quickstart-jax.rst:309 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "就是这样,现在你可以打开另外两个终端窗口,然后运行" -#: ../../source/how-to-monitor-simulation.rst:16 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:344 msgid "" -"`Prometheus `_ is used for data collection, while" -" `Grafana `_ will enable you to visualize the " -"collected data. They are both well integrated with `Ray " -"`_ which Flower uses under the hood." -msgstr "" -"`Prometheus `_ 用于收集数据,而 `Grafana " -"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " -"`_ 紧密集成。" +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" +msgstr "确保服务器正在运行后,您就能看到您的 PyTorch 项目(之前是集中式的)在两个客户端上运行联邦学习了。祝贺!" -#: ../../source/how-to-monitor-simulation.rst:18 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:351 msgid "" -"Overwrite the configuration files (depending on your device, it might be " -"installed on a different path)." -msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" - -#: ../../source/how-to-monitor-simulation.rst:20 -msgid "If you are on an M1 Mac, it should be:" -msgstr "如果你使用的是 M1 Mac,应该是这样:" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" +msgstr "" +"本示例的完整源代码为:`PyTorch: 从集中式到联合式 " +"`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在,您已经准备好进一步探讨这一主题了。比如在每个客户端使用不同的" +" CIFAR-10 子集会如何?增加更多客户端会如何?" -#: ../../source/how-to-monitor-simulation.rst:27 -msgid "On the previous generation Intel Mac devices, it should be:" -msgstr "在上一代英特尔 Mac 设备上,应该是这样:" +#: ../../source/explanation-differential-privacy.rst:2 +#: ../../source/explanation-differential-privacy.rst:14 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-monitor-simulation.rst:34 +#: ../../source/explanation-differential-privacy.rst:4 +#, fuzzy msgid "" -"Open the respective configuration files and change them. Depending on " -"your device, use one of the two following commands:" -msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" +"The information in datasets like healthcare, financial transactions, user" +" preferences, etc., is valuable and has the potential for scientific " +"breakthroughs and provides important business insights. However, such " +"data is also sensitive and there is a risk of compromising individual " +"privacy." +msgstr "医疗保健、金融交易、用户偏好等数据集中的信息非常宝贵,有可能带来科学突破并提供重要的商业见解。然而,这些数据也是敏感数据,存在泄露个人隐私的风险。" -#: ../../source/how-to-monitor-simulation.rst:44 +#: ../../source/explanation-differential-privacy.rst:9 +#, fuzzy msgid "" -"and then delete all the text in the file and paste a new Prometheus " -"config you see below. You may adjust the time intervals to your " -"requirements:" -msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" +"Traditional methods like anonymization alone would not work because of " +"attacks like Re-identification and Data Linkage. That's where " +"differential privacy comes in. It provides the possibility of analyzing " +"data while ensuring the privacy of individuals." +msgstr "单靠匿名等传统方法是行不通的,因为会受到重新识别和数据链接等攻击。这就是差异化隐私的用武之地。它提供了在分析数据的同时确保个人隐私的可能性。" -#: ../../source/how-to-monitor-simulation.rst:59 +#: ../../source/explanation-differential-privacy.rst:16 +#, fuzzy msgid "" -"Now after you have edited the Prometheus configuration, do the same with " -"the Grafana configuration files. Open those using one of the following " -"commands as before:" -msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" +"Imagine two datasets that are identical except for a single record (for " +"instance, Alice's data). Differential Privacy (DP) guarantees that any " +"analysis (M), like calculating the average income, will produce nearly " +"identical results for both datasets (O and O' would be similar). This " +"preserves group patterns while obscuring individual details, ensuring the" +" individual's information remains hidden in the crowd." +msgstr "" +"试想一下,两个数据集除了一条记录(例如 Alice " +"的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " +"将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/how-to-monitor-simulation.rst:69 -msgid "" -"Your terminal editor should open and allow you to apply the following " -"configuration as before." -msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" +#: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy +msgid "DP Intro" +msgstr "DP 介绍" -#: ../../source/how-to-monitor-simulation.rst:84 +#: ../../source/explanation-differential-privacy.rst:27 +#, fuzzy msgid "" -"Congratulations, you just downloaded all the necessary software needed " -"for metrics tracking. Now, let’s start it." -msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" +"One of the most commonly used mechanisms to achieve DP is adding enough " +"noise to the output of the analysis to mask the contribution of each " +"individual in the data while preserving the overall accuracy of the " +"analysis." +msgstr "实现 DP 的最常用机制之一是在分析输出中加入足够的噪音,以掩盖数据中每个个体的贡献,同时保持分析的整体准确性。" -#: ../../source/how-to-monitor-simulation.rst:88 -msgid "Tracking metrics" -msgstr "跟踪指标" +#: ../../source/explanation-differential-privacy.rst:32 +#, fuzzy +msgid "Formal Definition" +msgstr "编译 ProtoBuf 定义" -#: ../../source/how-to-monitor-simulation.rst:90 +#: ../../source/explanation-differential-privacy.rst:34 +#, fuzzy msgid "" -"Before running your Flower simulation, you have to start the monitoring " -"tools you have just installed and configured." -msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" +"Differential Privacy (DP) provides statistical guarantees against the " +"information an adversary can infer through the output of a randomized " +"algorithm. It provides an unconditional upper bound on the influence of a" +" single individual on the output of the algorithm by adding noise [1]. A " +"randomized mechanism M provides (:math:`\\epsilon`, " +":math:`\\delta`)-differential privacy if for any two neighboring " +"databases, D :sub:`1` and D :sub:`2`, that differ in only a single " +"record, and for all possible outputs S ⊆ Range(A):" +msgstr "" +"差分隐私(Differential " +"Privacy,DP)针对对手通过随机算法的输出所能推断出的信息提供统计保证。它为单个个体通过添加噪声对算法输出的影响提供了一个无条件的上限[1]。如果任意两个相邻的数据库D" +" :sub:`1`和D :sub:`2`只有一条记录不同,并且对于所有可能的输出S ⊆ " +"Range(A),随机化机制M提供(:math:`epsilon`,:math:`\\delta`)差异隐私:" -#: ../../source/how-to-monitor-simulation.rst:97 +#: ../../source/explanation-differential-privacy.rst:42 +#, fuzzy msgid "" -"Please include the following argument in your Python code when starting a" -" simulation." -msgstr "开始模拟时,请在 Python 代码中加入以下参数。" - -#: ../../source/how-to-monitor-simulation.rst:108 -msgid "Now, you are ready to start your workload." -msgstr "现在,您可以开始工作了。" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\epsilon} P[M(D_{2} \\in A)] + \\delta" +msgstr "" +"\\small\n" +"P[M(D_{1} \\in A)] \\leq e^{\\delta} P[M(D_{2} \\in A)] + \\delta" -#: ../../source/how-to-monitor-simulation.rst:110 +#: ../../source/explanation-differential-privacy.rst:47 +#, fuzzy msgid "" -"Shortly after the simulation starts, you should see the following logs in" -" your terminal:" -msgstr "模拟启动后不久,您就会在终端中看到以下日志:" - -#: ../../source/how-to-monitor-simulation.rst:117 -msgid "You can look at everything at ``_ ." -msgstr "您可以在 ``_ 查看所有内容。" +"The :math:`\\epsilon` parameter, also known as the privacy budget, is a " +"metric of privacy loss. It also controls the privacy-utility trade-off; " +"lower :math:`\\epsilon` values indicate higher levels of privacy but are " +"likely to reduce utility as well. The :math:`\\delta` parameter accounts " +"for a small probability on which the upper bound :math:`\\epsilon` does " +"not hold. The amount of noise needed to achieve differential privacy is " +"proportional to the sensitivity of the output, which measures the maximum" +" change in the output due to the inclusion or removal of a single record." +msgstr "" +":math:`\\epsilon`参数也称为隐私预算,是衡量隐私损失的指标。较低的 :math:`\\epsilon` " +"值表示较高的隐私级别,但也可能降低效用。:math:`\\delta`参数考虑了:math:`\\epsilon`上限不成立的小概率。实现差异化隐私所需的噪声量与输出的灵敏度成正比,而输出的灵敏度是指由于包含或删除一条记录而导致的输出的最大变化。" -#: ../../source/how-to-monitor-simulation.rst:119 -msgid "" -"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" -" lowest option)." -msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" +#: ../../source/explanation-differential-privacy.rst:56 +#, fuzzy +msgid "Differential Privacy in Machine Learning" +msgstr "差分隐私" -#: ../../source/how-to-monitor-simulation.rst:121 +#: ../../source/explanation-differential-privacy.rst:58 +#, fuzzy msgid "" -"Or alternatively, you can just see them in Grafana by clicking on the " -"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" -" only accessible during the simulation. After the simulation ends, you " -"can only use Grafana to explore the metrics. You can start Grafana by " -"going to ``http://localhost:3000/``." +"DP can be utilized in machine learning to preserve the privacy of the " +"training data. Differentially private machine learning algorithms are " +"designed in a way to prevent the algorithm to learn any specific " +"information about any individual data points and subsequently prevent the" +" model from revealing sensitive information. Depending on the stage at " +"which noise is introduced, various methods exist for applying DP to " +"machine learning algorithms. One approach involves adding noise to the " +"training data (either to the features or labels), while another method " +"entails injecting noise into the gradients of the loss function during " +"model training. Additionally, such noise can be incorporated into the " +"model's output." msgstr "" -"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " -"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " -"Grafana。" +"机器学习中可以利用 DP " +"来保护训练数据的隐私。差分保密机器学习算法的设计方式是防止算法学习到任何单个数据点的任何特定信息,从而防止模型泄露敏感信息。根据引入噪声的阶段,有多种方法可将" +" DP " +"应用于机器学习算法。一种方法是在训练数据(特征或标签)中添加噪声,另一种方法是在模型训练过程中向损失函数的梯度注入噪声。此外,这种噪声还可以被纳入模型的输出中。" -#: ../../source/how-to-monitor-simulation.rst:123 -msgid "" -"After you finish the visualization, stop Prometheus and Grafana. This is " -"important as they will otherwise block, for example port :code:`3000` on " -"your machine as long as they are running." -msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" +#: ../../source/explanation-differential-privacy.rst:69 +#, fuzzy +msgid "Differential Privacy in Federated Learning" +msgstr "扩大联邦学习的规模" -#: ../../source/how-to-monitor-simulation.rst:132 -msgid "Resource allocation" -msgstr "资源分配" +#: ../../source/explanation-differential-privacy.rst:71 +#, fuzzy +msgid "" +"Federated learning is a data minimization approach that allows multiple " +"parties to collaboratively train a model without sharing their raw data. " +"However, federated learning also introduces new privacy challenges. The " +"model updates between parties and the central server can leak information" +" about the local data. These leaks can be exploited by attacks such as " +"membership inference and property inference attacks, or model inversion " +"attacks." +msgstr "联合学习是一种数据最小化方法,允许多方在不共享原始数据的情况下合作训练一个模型。然而,联合学习也带来了新的隐私挑战。各方与中央服务器之间的模型更新可能会泄露本地数据信息。这些泄漏信息可能会被攻击利用,如成员推断攻击、属性推断攻击或模型反转攻击。" -#: ../../source/how-to-monitor-simulation.rst:134 +#: ../../source/explanation-differential-privacy.rst:78 +#, fuzzy msgid "" -"You must understand how the Ray library works to efficiently allocate " -"system resources to simulation clients on your own." -msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" +"DP can play a crucial role in federated learning to provide privacy for " +"the clients' data." +msgstr "DP 可以在联合学习中发挥重要作用,为客户数据提供隐私保护。" -#: ../../source/how-to-monitor-simulation.rst:136 +#: ../../source/explanation-differential-privacy.rst:81 +#, fuzzy msgid "" -"Initially, the simulation (which Ray handles under the hood) starts by " -"default with all the available resources on the system, which it shares " -"among the clients. It doesn't mean it divides it equally among all of " -"them, nor that the model training happens at all of them simultaneously. " -"You will learn more about that in the later part of this blog. You can " -"check the system resources by running the following:" +"Depending on the granularity of privacy provision or the location of " +"noise addition, different forms of DP exist in federated learning. In " +"this explainer, we focus on two approaches of DP utilization in federated" +" learning based on where the noise is added: at the server (also known as" +" the center) or at the client (also known as the local)." msgstr "" -"最初,模拟(由 Ray " -"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" +"根据提供隐私的粒度或添加噪声的位置,联合学习中存在不同形式的 DP。在本说明中,我们将根据添加噪声的位置,重点介绍联合学习中利用 DP " +"的两种方法:在服务器(也称为中心)或客户端(也称为本地)。" -#: ../../source/how-to-monitor-simulation.rst:143 -msgid "In Google Colab, the result you see might be similar to this:" -msgstr "在 Google Colab 中,您看到的结果可能与此类似:" +#: ../../source/explanation-differential-privacy.rst:86 +#, fuzzy +msgid "" +"**Central Differential Privacy**: DP is applied by the server and the " +"goal is to prevent the aggregated model from leaking information about " +"each client's data." +msgstr "**中央差分隐私**: DP 由服务器应用,目标是防止聚合模型泄露每个客户的数据信息。" -#: ../../source/how-to-monitor-simulation.rst:155 +#: ../../source/explanation-differential-privacy.rst:88 +#, fuzzy msgid "" -"However, you can overwrite the defaults. When starting a simulation, do " -"the following (you don't need to overwrite all of them):" -msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" +"**Local Differential Privacy**: DP is applied on the client side before " +"sending any information to the server and the goal is to prevent the " +"updates that are sent to the server from leaking any information about " +"the client's data." +msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" -#: ../../source/how-to-monitor-simulation.rst:175 -msgid "Let’s also specify the resource for a single client." -msgstr "我们还可以为单个客户指定资源。" +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:93 +#: ../../source/how-to-use-differential-privacy.rst:15 +#, fuzzy +msgid "Central Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-monitor-simulation.rst:205 +#: ../../source/explanation-differential-privacy.rst:95 +#, fuzzy msgid "" -"Now comes the crucial part. Ray will start a new client only when it has " -"all the required resources (such that they run in parallel) when the " -"resources allow." -msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" +"In this approach, which is also known as user-level DP, the central " +"server is responsible for adding noise to the globally aggregated " +"parameters. It should be noted that trust in the server is required." +msgstr "在这种方法(也称为用户级 DP)中,中央服务器负责在全局汇总参数中添加噪声。需要注意的是,这需要对服务器的信任。" -#: ../../source/how-to-monitor-simulation.rst:207 +#: ../../source/explanation-differential-privacy.rst:104 +#, fuzzy msgid "" -"In the example above, only one client will be run, so your clients won't " -"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " -"running two clients and therefore enable them to run concurrently. Be " -"careful not to require more resources than available. If you specified " -":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " -"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." -msgstr "" -"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " -"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " -"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" - -#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 -msgid "FAQ" -msgstr "常见问题" - -#: ../../source/how-to-monitor-simulation.rst:214 -msgid "Q: I don't see any metrics logged." -msgstr "问:我没有看到任何指标记录。" +"While there are various ways to implement central DP in federated " +"learning, we concentrate on the algorithms proposed by [2] and [3]. The " +"overall approach is to clip the model updates sent by the clients and add" +" some amount of noise to the aggregated model. In each iteration, a " +"random set of clients is chosen with a specific probability for training." +" Each client performs local training on its own data. The update of each " +"client is then clipped by some value `S` (sensitivity `S`). This would " +"limit the impact of any individual client which is crucial for privacy " +"and often beneficial for robustness. A common approach to achieve this is" +" by restricting the `L2` norm of the clients' model updates, ensuring " +"that larger updates are scaled down to fit within the norm `S`." +msgstr "" +"虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" +" `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" -#: ../../source/how-to-monitor-simulation.rst:216 -msgid "" -"A: The timeframe might not be properly set. The setting is in the top " -"right corner (\"Last 30 minutes\" by default). Please change the " -"timeframe to reflect the period when the simulation was running." -msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" +#: ../../source/explanation-differential-privacy.rst:-1 +#, fuzzy +msgid "clipping" +msgstr "剪贴" -#: ../../source/how-to-monitor-simulation.rst:218 +#: ../../source/explanation-differential-privacy.rst:120 +#, fuzzy msgid "" -"Q: I see “Grafana server not detected. Please make sure the Grafana " -"server is running and refresh this page” after going to the Metrics tab " -"in Ray Dashboard." -msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" +"Afterwards, the Gaussian mechanism is used to add noise in order to " +"distort the sum of all clients' updates. The amount of noise is scaled to" +" the sensitivity value to obtain a privacy guarantee. The Gaussian " +"mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( " +"noise_scale * S ) / (number of sampled clients)`." +msgstr "" +"然后,使用高斯机制添加噪声,以扭曲所有客户端的更新总和。噪声量与灵敏度值成正比,以获得隐私保证。高斯机制的噪声采样范围为 `N (0, σ²)` " +",其中 σ = ( 噪声规模 * S ) / (采样客户数)`。" -#: ../../source/how-to-monitor-simulation.rst:220 -msgid "" -"A: You probably don't have Grafana running. Please check the running " -"services" -msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" +#: ../../source/explanation-differential-privacy.rst:126 +#, fuzzy +msgid "Clipping" +msgstr "剪贴" -#: ../../source/how-to-monitor-simulation.rst:226 +#: ../../source/explanation-differential-privacy.rst:128 +#, fuzzy msgid "" -"Q: I see \"This site can't be reached\" when going to " -"``_." -msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" +"There are two forms of clipping commonly used in Central DP: Fixed " +"Clipping and Adaptive Clipping." +msgstr "中央处理器常用的剪切有两种形式:固定剪切和自适应剪切。" -#: ../../source/how-to-monitor-simulation.rst:228 +#: ../../source/explanation-differential-privacy.rst:131 +#, fuzzy msgid "" -"A: Either the simulation has already finished, or you still need to start" -" Prometheus." -msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" - -#: ../../source/how-to-monitor-simulation.rst:232 -msgid "Resources" -msgstr "资源" +"**Fixed Clipping** : A predefined fix threshold is set for the magnitude " +"of clients' updates. Any update exceeding this threshold is clipped back " +"to the threshold value." +msgstr "** 固定削波** : 为客户端更新的大小设置了一个预定义的固定阈值。任何超过该阈值的更新都会被剪切回阈值。" -#: ../../source/how-to-monitor-simulation.rst:234 +#: ../../source/explanation-differential-privacy.rst:133 #, fuzzy msgid "" -"Ray Dashboard: ``_" -msgstr "Ray 仪表盘: ``_" +"**Adaptive Clipping** : The clipping threshold dynamically adjusts based " +"on the observed update distribution [4]. It means that the clipping value" +" is tuned during the rounds with respect to the quantile of the update " +"norm distribution." +msgstr "** 自适应削波** : 削波阈值根据观察到的更新分布动态调整[4]。这意味着,在各轮中,会根据更新规范分布的量化值调整削波值。" -#: ../../source/how-to-monitor-simulation.rst:236 +#: ../../source/explanation-differential-privacy.rst:137 #, fuzzy -msgid "Ray Metrics: ``_" -msgstr "" -"Ray 指标: ``_" +msgid "" +"The choice between fixed and adaptive clipping depends on various factors" +" such as privacy requirements, data distribution, model complexity, and " +"others." +msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" -#: ../../source/how-to-run-flower-using-docker.rst:2 +#: ../../source/explanation-differential-privacy.rst:-1 +#: ../../source/explanation-differential-privacy.rst:141 +#: ../../source/how-to-use-differential-privacy.rst:113 #, fuzzy -msgid "Run Flower using Docker" -msgstr "使用 Docker 运行 Flower" +msgid "Local Differential Privacy" +msgstr "差分隐私" -#: ../../source/how-to-run-flower-using-docker.rst:4 +#: ../../source/explanation-differential-privacy.rst:143 #, fuzzy msgid "" -"The simplest way to get started with Flower is by using the pre-made " -"Docker images, which you can find on `Docker Hub " -"`__. Supported architectures include " -"``amd64`` and ``arm64v8``." +"In this approach, each client is responsible for performing DP. Local DP " +"avoids the need for a fully trusted aggregator, but it should be noted " +"that local DP leads to a decrease in accuracy but better privacy in " +"comparison to central DP." msgstr "" -"开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 `Docker Hub " -"`_ 上找到这些镜像。" +"在这种方法中,每个客户端都负责执行 DP。本地 DP 避免了对完全可信的聚合器的需求,但需要注意的是,与中央 DP 相比,本地 DP " +"会降低准确性,但却能更好地保护隐私。" -#: ../../source/how-to-run-flower-using-docker.rst:8 +#: ../../source/explanation-differential-privacy.rst:152 #, fuzzy -msgid "Before you start, make sure that the Docker daemon is running:" -msgstr "开始之前,请确保 Docker 守护进程正在运行:" +msgid "In this explainer, we focus on two forms of achieving Local DP:" +msgstr "在本说明中,我们将重点介绍实现本地 DP 的两种形式:" -#: ../../source/how-to-run-flower-using-docker.rst:15 +#: ../../source/explanation-differential-privacy.rst:154 #, fuzzy msgid "" -"If you do not see the version of Docker but instead get an error saying " -"that the command was not found, you will need to install Docker first. " -"You can find installation instruction `here `_." +"Each client adds noise to the local updates before sending them to the " +"server. To achieve (:math:`\\epsilon`, :math:`\\delta`)-DP, considering " +"the sensitivity of the local model to be ∆, Gaussian noise is applied " +"with a noise scale of σ where:" msgstr "" -"如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在 " -"`_ 找到安装说明。" +"每个客户端在向服务器发送本地更新之前,都会在本地更新中加入噪声。为了实现(:math:`\\epsilon`, " +":math:`\\delta`)-DP,考虑到本地模型的灵敏度为 ∆,应用了高斯噪声,噪声尺度为 σ,其中:" -#: ../../source/how-to-run-flower-using-docker.rst:21 +#: ../../source/explanation-differential-privacy.rst:158 #, fuzzy msgid "" -"On Linux, Docker commands require ``sudo`` privilege. If you want to " -"avoid using ``sudo``, you can follow the `Post-installation steps " -"`_ on the " -"official Docker website." +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}" msgstr "" -"在 Linux 上,Docker 命令需要 ``sudo`` 权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 " -"`安装后步骤 `_进行操作。" +"\\small\n" +"\\frac{∆ \\times \\sqrt{2 \\times " +"\\log\\left(\\frac{1.25}{\\delta}\\right)}}{\\epsilon}\n" +"\n" -#: ../../source/how-to-run-flower-using-docker.rst:27 +#: ../../source/explanation-differential-privacy.rst:163 #, fuzzy msgid "" -"To ensure optimal performance and compatibility, the SuperLink, SuperNode" -" and ServerApp image must have the same version when running together. " -"This guarantees seamless integration and avoids potential conflicts or " -"issues that may arise from using different versions." -msgstr "" -"为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " -"映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" +"Each client adds noise to the gradients of the model during the local " +"training (DP-SGD). More specifically, in this approach, gradients are " +"clipped and an amount of calibrated noise is injected into the gradients." +msgstr "在局部训练过程中,每个客户端都会向模型的梯度添加噪声(DP-SGD)。更具体地说,在这种方法中,梯度会被剪切,并在梯度中注入一定量的校准噪声。" -#: ../../source/how-to-run-flower-using-docker.rst:32 +#: ../../source/explanation-differential-privacy.rst:167 #, fuzzy -msgid "Flower SuperLink" -msgstr "flower-superlink" +msgid "" +"Please note that these two approaches are providing privacy at different " +"levels." +msgstr "请注意,这两种方法提供了不同层次的隐私。" -#: ../../source/how-to-run-flower-using-docker.rst:35 +#: ../../source/explanation-differential-privacy.rst:169 #, fuzzy -msgid "Quickstart" -msgstr "快速入门 JAX" +msgid "**References:**" +msgstr "参考资料" -#: ../../source/how-to-run-flower-using-docker.rst:37 +#: ../../source/explanation-differential-privacy.rst:171 #, fuzzy -msgid "If you're looking to try out Flower, you can use the following command:" -msgstr "如果您想试用 Flower,可以使用以下命令:" +msgid "[1] Dwork et al. The Algorithmic Foundations of Differential Privacy." +msgstr "[1] Dwork 等:《差分隐私的算法基础》。" -#: ../../source/how-to-run-flower-using-docker.rst:43 +#: ../../source/explanation-differential-privacy.rst:173 #, fuzzy msgid "" -"The command pulls the Docker image with the tag ``1.8.0`` from Docker " -"Hub. The tag specifies the Flower version. In this case, Flower 1.8.0. " -"The ``--rm`` flag tells Docker to remove the container after it exits." +"[2] McMahan et al. Learning Differentially Private Recurrent Language " +"Models." msgstr "" -"该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的 Docker 镜像。标签包含使用 " -"Flower、Python 和 Ubuntu 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和 Ubuntu " -"22.04。rm \"标记告诉 Docker 在退出后移除容器。" +"McMahan, H. Brendan等. \"Learning differentially private recurrent " +"language models.\" arXiv preprint arXiv:1710.06963 (2017)." -#: ../../source/how-to-run-flower-using-docker.rst:49 +#: ../../source/explanation-differential-privacy.rst:175 #, fuzzy msgid "" -"By default, the Flower SuperLink keeps state in-memory. When using the " -"Docker flag ``--rm``, the state is not persisted between container " -"starts. We will show below how to save the state in a file on your host " -"system." -msgstr "" -"默认情况下,Flower 服务器会将状态保存在内存中。使用 Docker 标志 ``--rm`` " -"时,状态不会在容器启动之间持久化。下面我们将展示如何将状态保存到主机系统上的文件中。" +"[3] Geyer et al. Differentially Private Federated Learning: A Client " +"Level Perspective." +msgstr "[3] Geyer 等人。差异化化私人联合学习:客户层面的视角。" -#: ../../source/how-to-run-flower-using-docker.rst:53 +#: ../../source/explanation-differential-privacy.rst:177 #, fuzzy -msgid "" -"The ``-p :`` flag tells Docker to map the ports " -"``9091``/``9092`` of the host to ``9091``/``9092`` of the container, " -"allowing you to access the Driver API on ``http://localhost:9091`` and " -"the Fleet API on ``http://localhost:9092``. Lastly, any flag that comes " -"after the tag is passed to the Flower SuperLink. Here, we are passing the" -" flag ``--insecure``." +msgid "[4] Galen et al. Differentially Private Learning with Adaptive Clipping." msgstr "" -"``-p :`` 标记会告诉 Docker 将主机的端口 ``9091``/``9092`` 映射到容器的端口 " -"``9091``/`9092``,这样你就可以在 ``http://localhost:9091`` 上访问 Driver API,在 " -"``http://localhost:9092`` 上访问 Fleet API。最后,标签后面的任何标志都会传递给 Flower " -"服务器。在这里,我们传递的标志是 ``--insecure`` 。" +"Andrew, Galen等. \"Differentially private learning with adaptive " +"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " +"17455-17466." -#: ../../source/how-to-run-flower-using-docker.rst:60 -#: ../../source/how-to-run-flower-using-docker.rst:259 -#: ../../source/how-to-run-flower-using-docker.rst:376 -#, fuzzy -msgid "" -"The ``--insecure`` flag enables insecure communication (using HTTP, not " -"HTTPS) and should only be used for testing purposes. We strongly " -"recommend enabling `SSL `__ when " -"deploying to a production environment." -msgstr "" -"不安全 \"标志启用不安全通信(使用 HTTP,而非 HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " -"`_。" +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "联邦学习评估" -#: ../../source/how-to-run-flower-using-docker.rst:65 -#, fuzzy +#: ../../source/explanation-federated-evaluation.rst:4 msgid "" -"You can use ``--help`` to view all available flags that the SuperLink " -"supports:" -msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." +msgstr "评估联合学习系统中的模型主要有两种方法:集中(或服务器端)评估和联邦(或客户端)评估。" -#: ../../source/how-to-run-flower-using-docker.rst:72 -#, fuzzy -msgid "Mounting a volume to store the state on the host system" -msgstr "在主机系统上挂载卷以存储状态" +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "集中评估" -#: ../../source/how-to-run-flower-using-docker.rst:74 +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "内置策略" + +#: ../../source/explanation-federated-evaluation.rst:13 msgid "" -"If you want to persist the state of the SuperLink on your host system, " -"all you need to do is specify a directory where you want to save the file" -" on your host system and a name for the database file. By default, the " -"SuperLink container runs with a non-root user called ``app`` with the " -"user ID ``49999``. It is recommended to create new directory and change " -"the user ID of the directory to ``49999`` to ensure the mounted directory" -" has the proper permissions. If you later want to delete the directory, " -"you can change the user ID back to the current user ID by running ``sudo " -"chown -R $USER:$(id -gn) state``." -msgstr "" +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" +msgstr "所有内置策略都通过在初始化过程中提供一个评估函数来支持集中评估。评估函数是任何可以将当前全局模型参数作为输入并返回评估结果的函数:" + +#: ../../source/explanation-federated-evaluation.rst:61 +msgid "Custom Strategies" +msgstr "定制策略" -#: ../../source/how-to-run-flower-using-docker.rst:82 +#: ../../source/explanation-federated-evaluation.rst:63 #, fuzzy msgid "" -"In the example below, we create a new directory, change the user ID and " -"tell Docker via the flag ``--volume`` to mount the local ``state`` " -"directory into the ``/app/state`` directory of the container. " -"Furthermore, we use the flag ``--database`` to specify the name of the " -"database file." +"The ``Strategy`` abstraction provides a method called ``evaluate`` that " +"can directly be used to evaluate the current global model parameters. The" +" current server implementation calls ``evaluate`` after parameter " +"aggregation and before federated evaluation (see next paragraph)." msgstr "" -"如果想在主机系统上持久保存服务器的状态,只需在主机系统上指定保存文件的路径和数据库文件的名称即可。在下面的示例中,我们通过标志 ``-v`` 告诉" -" Docker 将用户的主目录(主机上的 ``~/``)挂载到容器的 ``/app/`` 目录中。此外,我们使用标志 ``--database``" -" 来指定数据库文件的名称。" +":code:`Strategy` 抽象提供了一个名为 :code:`evaluate` " +"的方法,可直接用于评估当前的全局模型参数。服务器会在参数聚合后和联邦评估前调用 :code:`evaluate`(见下段)。" + +#: ../../source/explanation-federated-evaluation.rst:69 +msgid "Federated Evaluation" +msgstr "联邦评估" + +#: ../../source/explanation-federated-evaluation.rst:72 +msgid "Implementing Federated Evaluation" +msgstr "实现联邦评估" -#: ../../source/how-to-run-flower-using-docker.rst:95 +#: ../../source/explanation-federated-evaluation.rst:74 #, fuzzy msgid "" -"As soon as the SuperLink starts, the file ``state.db`` is created in the " -"``state`` directory on your host system. If the file already exists, the " -"SuperLink tries to restore the state from the file. To start the " -"SuperLink with an empty database, simply remove the ``state.db`` file." -msgstr "" -"服务器一启动,就会在主机系统的用户主目录下创建文件 " -"``state.db``。如果该文件已经存在,服务器会尝试从该文件恢复状态。要以空数据库启动服务器,只需删除 ``state.db`` 文件即可。" +"Client-side evaluation happens in the ``Client.evaluate`` method and can " +"be configured from the server side." +msgstr "客户端评估在 :code:`Client.evaluate` 方法中进行,并可从服务器端进行配置。" -#: ../../source/how-to-run-flower-using-docker.rst:100 -#: ../../source/how-to-run-flower-using-docker.rst:281 -#: ../../source/how-to-run-flower-using-docker.rst:397 -#, fuzzy -msgid "Enabling SSL for secure connections" -msgstr "启用 SSL 连接" +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "Configuring Federated Evaluation" +msgstr "配置联邦评估" -#: ../../source/how-to-run-flower-using-docker.rst:102 -#, fuzzy +#: ../../source/explanation-federated-evaluation.rst:110 msgid "" -"To enable SSL, you will need a PEM-encoded root certificate, a PEM-" -"encoded private key and a PEM-encoded certificate chain." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "联邦评估可从服务器端进行配置。内置策略支持以下参数:" -#: ../../source/how-to-run-flower-using-docker.rst:106 +#: ../../source/explanation-federated-evaluation.rst:113 #, fuzzy msgid "" -"For testing purposes, you can generate your own self-signed certificates." -" The `Enable SSL connections `__ page contains a section that" -" will guide you through the process." +"``fraction_evaluate``: a ``float`` defining the fraction of clients that " +"will be selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.1`` and ``100`` clients are connected to the server, then ``10`` will" +" be randomly selected for evaluation. If ``fraction_evaluate`` is set to " +"``0.0``, federated evaluation will be disabled." msgstr "" -"出于测试目的,你可以生成自己的自签名证书。启用 SSL 连接 `_ 页面中有一个部分将指导你完成这一过程。" +":code:`fraction_evaluate`: :code:`float`,定义了被选中进行评估的客户端的比例。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,并且 :code:`100` 个客户端连接到服务器,那么 " +":code:`10` 个客户端将被随机选中进行评估。如果 :code:`fraction_evaluate` 设置为 " +":code:`0.0`,联邦评估将被禁用。" -#: ../../source/how-to-run-flower-using-docker.rst:110 +#: ../../source/explanation-federated-evaluation.rst:118 #, fuzzy msgid "" -"Assuming all files we need are in the local ``certificates`` directory, " -"we can use the flag ``--volume`` to mount the local directory into the " -"``/app/certificates/`` directory of the container. This allows the " -"SuperLink to access the files within the container. The ``ro`` stands for" -" ``read-only``. Docker volumes default to ``read-write``; that option " -"tells Docker to make the volume ``read-only`` instead. Finally, we pass " -"the names of the certificates and key file to the SuperLink with the " -"``--ssl-ca-certfile``, ``--ssl-certfile`` and ``--ssl-keyfile`` flag." +"``min_evaluate_clients``: an ``int``: the minimum number of clients to be" +" selected for evaluation. If ``fraction_evaluate`` is set to ``0.1``, " +"``min_evaluate_clients`` is set to 20, and ``100`` clients are connected " +"to the server, then ``20`` clients will be selected for evaluation." msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" +":code:`min_evaluate_clients`:一个 :code:`int`,需要评估的客户的最小数量。如果 " +":code:`fraction_evaluate` 设置为 :code:`0.1`,:code:`min_evaluate_clients` " +"设置为 20,并且有 :code:`100` 个客户端已连接到服务器,那么 :code:`20` 个客户端将被选中进行评估。" -#: ../../source/how-to-run-flower-using-docker.rst:128 +#: ../../source/explanation-federated-evaluation.rst:122 +#, fuzzy msgid "" -"Because Flower containers, by default, run with a non-root user ``app``, " -"the mounted files and directories must have the proper permissions for " -"the user ID ``49999``. For example, to change the user ID of all files in" -" the ``certificates/`` directory, you can run ``sudo chown -R 49999:49999" -" certificates/*``." +"``min_available_clients``: an ``int`` that defines the minimum number of " +"clients which need to be connected to the server before a round of " +"federated evaluation can start. If fewer than ``min_available_clients`` " +"are connected to the server, the server will wait until more clients are " +"connected before it continues to sample clients for evaluation." msgstr "" +":code:`min_available_clients`: " +":code:`int`,定义了在一轮联邦评估开始之前,需要连接到服务器的最小客户端数量。如果连接到服务器的客户端数量少于 " +":code:`min_available_clients`,服务器将等待更多客户端连接后,才继续采样客户端进行评估。" -#: ../../source/how-to-run-flower-using-docker.rst:134 +#: ../../source/explanation-federated-evaluation.rst:127 #, fuzzy -msgid "Flower SuperNode" -msgstr "Flower 服务器" +msgid "" +"``on_evaluate_config_fn``: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." +msgstr "code:`on_evaluate_config_fn`:返回配置字典的函数,该字典将发送给选定的客户端。该函数将在每一轮中被调用,并提供了一种方便的方法来从服务器端自定义客户端评估,例如,配置执行的验证步骤数。" -#: ../../source/how-to-run-flower-using-docker.rst:136 +#: ../../source/explanation-federated-evaluation.rst:157 +msgid "Evaluating Local Model Updates During Training" +msgstr "评估训练期间的本地模型更新" + +#: ../../source/explanation-federated-evaluation.rst:159 #, fuzzy msgid "" -"The SuperNode Docker image comes with a pre-installed version of Flower " -"and serves as a base for building your own SuperNode image." -msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" +"Model parameters can also be evaluated during training. ``Client.fit`` " +"can return arbitrary evaluation results as a dictionary:" +msgstr "模型参数也可在训练过程中进行评估。 :code:`Client.fit`可以字典形式返回任意评估结果:" -#: ../../source/how-to-run-flower-using-docker.rst:141 -#, fuzzy +#: ../../source/explanation-federated-evaluation.rst:201 +msgid "Full Code Example" +msgstr "完整代码示例" + +#: ../../source/explanation-federated-evaluation.rst:203 msgid "" -"The SuperNode Docker image currently works only with the 1.9.0-nightly " -"release. A stable version will be available when Flower 1.9.0 (stable) " -"gets released (ETA: May). A SuperNode nightly image must be paired with " -"the corresponding SuperLink and ServerApp nightly images released on the " -"same day. To ensure the versions are in sync, using the concrete tag, " -"e.g., ``1.9.0.dev20240501`` instead of ``nightly`` is recommended." +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" msgstr "" -"超级节点 Docker 映像目前仅适用于 1.9.0-nightly 版本。稳定版将在 Flower " -"1.9.0(稳定版)发布时推出(预计发布时间:5 " -"月)。超级节点夜间镜像必须与同一天发布的相应超级链接和服务器应用程序夜间镜像配对。为确保版本同步,建议使用具体标签,例如``1.9.0.dev20240501``,而不是``nightly``。" +"有关同时使用集中评估和联邦评估的完整代码示例,请参阅 *Advanced TensorFlow " +"Example*(同样的方法也可应用于任何其他框架中): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" -#: ../../source/how-to-run-flower-using-docker.rst:147 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:-1 msgid "" -"We will use the ``quickstart-pytorch`` example, which you can find in the" -" Flower repository, to illustrate how you can dockerize your ClientApp." -msgstr "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 ClientApp 进行 docker 化。" +"Explore the federated learning architecture of the Flower framework, " +"featuring multi-run, concurrent execution, and scalable, secure machine " +"learning while preserving data privacy." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:155 -#, fuzzy -msgid "" -"Before we can start, we need to meet a few prerequisites in our local " -"development environment. You can skip the first part if you want to run " -"your ClientApp instead of the ``quickstart-pytorch`` example." -msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" - -#: ../../source/how-to-run-flower-using-docker.rst:159 -#, fuzzy -msgid "Clone the Flower repository." -msgstr "**叉花仓库**" - -#: ../../source/how-to-run-flower-using-docker.rst:173 -#, fuzzy -msgid "Creating a SuperNode Dockerfile" -msgstr "创建超级节点 Dockerfile" - -#: ../../source/how-to-run-flower-using-docker.rst:175 -#: ../../source/how-to-run-flower-using-docker.rst:311 -#, fuzzy -msgid "Let's assume the following project layout:" -msgstr "假设项目布局如下" +#: ../../source/explanation-flower-architecture.rst:2 +msgid "Flower Architecture" +msgstr "Flower的架构" -#: ../../source/how-to-run-flower-using-docker.rst:184 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:4 msgid "" -"First, we need to create a ``requirements.txt`` file in the directory " -"where the ``ClientApp`` code is located. In the file, we list all the " -"dependencies that the ClientApp requires." +"This page explains the architecture of deployed Flower federated learning" +" system." msgstr "" -"首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 ``requirements.txt`` 文件。在该文件中,我们列出了 " -"ClientApp 需要的所有依赖项。" -#: ../../source/how-to-run-flower-using-docker.rst:196 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:6 msgid "" -"Note that `flwr `__ is already installed " -"in the ``flwr/supernode`` base image, so you only need to include other " -"package dependencies in your ``requirements.txt``, such as ``torch``, " -"``tensorflow``, etc." +"In federated learning (FL), there is typically one server and a number of" +" clients that are connected to the server. This is often called a " +"federation." msgstr "" -"请注意,`flwr `__ " -"已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" -#: ../../source/how-to-run-flower-using-docker.rst:200 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:9 msgid "" -"Next, we create a Dockerfile. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.supernode`` in ``examples" -"/quickstart-pytorch``." +"The role of the server is to coordinate the training process. The role of" +" each client is to receive tasks from the server, execute those tasks and" +" return the results back to the server." msgstr "" -"接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch`` 示例,请在 ``examples" -"/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.supernode`` 的新文件。" - -#: ../../source/how-to-run-flower-using-docker.rst:203 -#, fuzzy -msgid "" -"The ``Dockerfile.supernode`` contains the instructions that assemble the " -"SuperNode image." -msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" -#: ../../source/how-to-run-flower-using-docker.rst:217 -#, fuzzy -msgid "" -"In the first two lines, we instruct Docker to use the SuperNode image " -"tagged ``nightly`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. Next, we install the ClientApp dependencies by copying the " -"``requirements.txt`` file into the image and run ``pip install``. In the " -"last two lines, we copy the ``client.py`` module into the image and set " -"the entry point to ``flower-client-app`` with the argument " -"``client:app``. The argument is the object reference of the ClientApp " -"(``:``) that will be run inside the ClientApp." +#: ../../source/explanation-flower-architecture.rst:13 +msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -"在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 SuperNode 镜像作为基础镜像,并将工作目录设置为 " -"``/app``。下面的指令将在 ``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " -"文件复制到映像中并运行 ``pip install`` 来安装 ClientApp 依赖项。最后两行,我们将 ``client.py`` " -"模块复制到映像中,并将入口点设置为 ``flower-client-app``,参数为 ``client:app``。参数是将在 " -"ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" - -#: ../../source/how-to-run-flower-using-docker.rst:226 -#, fuzzy -msgid "Building the SuperNode Docker image" -msgstr "启动服务器" - -#: ../../source/how-to-run-flower-using-docker.rst:228 -#, fuzzy -msgid "" -"Next, we build the SuperNode Docker image by running the following " -"command in the directory where Dockerfile and ClientApp code are located." -msgstr "接下来,我们在 Dockerfile 和 ClientApp 代码所在的目录下运行以下命令,构建 SuperNode Docker 映像。" - -#: ../../source/how-to-run-flower-using-docker.rst:235 -#, fuzzy -msgid "" -"We gave the image the name ``flwr_supernode``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "我们将图像命名为 ``flwr_supernode``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" - -#: ../../source/how-to-run-flower-using-docker.rst:240 -#, fuzzy -msgid "Running the SuperNode Docker image" -msgstr "启动服务器" - -#: ../../source/how-to-run-flower-using-docker.rst:242 -#, fuzzy -msgid "Now that we have built the SuperNode image, we can finally run it." -msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" - -#: ../../source/how-to-run-flower-using-docker.rst:250 -#: ../../source/how-to-run-flower-using-docker.rst:367 -#, fuzzy -msgid "Let's break down each part of this command:" -msgstr "让我们来分析一下这条命令的各个部分:" -#: ../../source/how-to-run-flower-using-docker.rst:252 -#: ../../source/how-to-run-flower-using-docker.rst:369 +#: ../../source/explanation-flower-architecture.rst:21 #, fuzzy -msgid "``docker run``: This is the command to run a new Docker container." -msgstr "`docker run``: 这是运行新 Docker 容器的命令。" +msgid "Hub-and-spoke topology in federated learning" +msgstr "什么是联邦学习?" -#: ../../source/how-to-run-flower-using-docker.rst:253 -#: ../../source/how-to-run-flower-using-docker.rst:370 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:21 msgid "" -"``--rm``: This option specifies that the container should be " -"automatically removed when it stops." -msgstr "`-rm``: 该选项指定容器停止时应自动移除。" - -#: ../../source/how-to-run-flower-using-docker.rst:254 -#, fuzzy -msgid "``flwr_supernode:0.0.1``: The name the tag of the Docker image to use." -msgstr "flwr_supernode:0.0.1``: 要使用的 Docker 映像的名称和标记。" - -#: ../../source/how-to-run-flower-using-docker.rst:255 -#: ../../source/how-to-run-flower-using-docker.rst:372 -#, fuzzy -msgid "``--insecure``: This option enables insecure communication." -msgstr "不安全\": 该选项启用不安全通信。" +"Hub-and-spoke topology in federated learning (one server, multiple " +"clients)." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:23 msgid "" -"``--superlink 192.168.1.100:9092``: This option specifies the address of " -"the SuperLinks Fleet" -msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" - -#: ../../source/how-to-run-flower-using-docker.rst -#, fuzzy -msgid "API to connect to. Remember to update it with your SuperLink IP." -msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" +"In a real-world deployment, we typically want to run different projects " +"on such a federation. Each project could use different hyperparameters, " +"different model architectures, different aggregation strategies, or even " +"different machine learning frameworks like PyTorch and TensorFlow." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:269 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:28 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your SuperNodes." +"This is why, in Flower, both the server side and the client side are " +"split into two parts. One part is long-lived and responsible for " +"communicating across the network, the other part is short-lived and " +"executes task-specific code." msgstr "" -"要测试在本地运行 Flower,可以创建一个 \"桥接网络 `__\",使用\"--网络 " -"\"参数并传递 Docker 网络的名称,以运行超级节点。" -#: ../../source/how-to-run-flower-using-docker.rst:273 -#, fuzzy -msgid "" -"Any argument that comes after the tag is passed to the Flower SuperNode " -"binary. To see all available flags that the SuperNode supports, run:" -msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" +#: ../../source/explanation-flower-architecture.rst:32 +msgid "A Flower `server` consists of **SuperLink** and ``ServerApp``:" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:283 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:34 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your SuperNode container." -msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" +"**SuperLink**: a long-running process that forwards task instructions to " +"clients (SuperNodes) and receives task results back." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:285 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:36 msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the SuperNode to access the certificate " -"within the container. Use the ``--root-certificates`` flag when starting " -"the container." +"``ServerApp``: a short-lived process with project-spcific code that " +"customizes all server-side aspects of federated learning systems (client " +"selection, client configuration, result aggregation). This is what AI " +"researchers and AI engineers write when they build Flower apps." msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/how-to-run-flower-using-docker.rst:297 -#, fuzzy -msgid "Flower ServerApp" -msgstr "Flower 服务器。" +#: ../../source/explanation-flower-architecture.rst:41 +msgid "A Flower `client` consists of **SuperNode** and ``ClientApp``:" +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:299 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:43 msgid "" -"The procedure for building and running a ServerApp image is almost " -"identical to the SuperNode image." -msgstr "构建和运行 ServerApp 映像的程序与 SuperNode 映像几乎完全相同。" +"**SuperNode**: a long-running process that connects to the SuperLink, " +"asks for tasks, executes tasks (for example, \"train this model on your " +"local data\") and returns task results back to the SuperLink." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:301 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:46 msgid "" -"Similar to the SuperNode image, the ServerApp Docker image comes with a " -"pre-installed version of Flower and serves as a base for building your " -"own ServerApp image." +"``ClientApp``: a short-lived process with project-specific code that " +"customizes all client-side aspects of federated learning systems (local " +"model training and evaluation, pre- and post-processing). This is what AI" +" researchers and AI engineers write when they build Flower apps." msgstr "" -"与 SuperNode 映像类似,ServerApp Docker 映像也预装了 Flower 版本,可作为构建自己的 ServerApp " -"映像的基础。" -#: ../../source/how-to-run-flower-using-docker.rst:304 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:51 msgid "" -"We will use the same ``quickstart-pytorch`` example as we do in the " -"Flower SuperNode section. If you have not already done so, please follow " -"the `SuperNode Prerequisites`_ before proceeding." +"Why SuperNode and SuperLink? Well, in federated learning, the clients are" +" the actual stars of the show. They hold the training data and they run " +"the actual training. This is why Flower decided to name them " +"**SuperNode**. The **SuperLink** is then responsible for acting as the " +"`missing link` between all those SuperNodes." msgstr "" -"我们将使用与 \"Flower SuperNode \"部分相同的 \"quickstart-pytorch " -"\"示例。如果您还没有这样做,请在继续之前遵循 \"SuperNode 先决条件\"。" -#: ../../source/how-to-run-flower-using-docker.rst:309 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "Creating a ServerApp Dockerfile" -msgstr "创建 ServerApp Dockerfile" +msgid "Basic Flower architecture" +msgstr "Flower的架构" -#: ../../source/how-to-run-flower-using-docker.rst:320 +#: ../../source/explanation-flower-architecture.rst:62 #, fuzzy -msgid "" -"First, we need to create a Dockerfile in the directory where the " -"``ServerApp`` code is located. If you use the ``quickstart-pytorch`` " -"example, create a new file called ``Dockerfile.serverapp`` in ``examples" -"/quickstart-pytorch``." -msgstr "" -"首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用 ``quickstart-" -"pytorch`` 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为 " -"``Dockerfile.serverapp`` 的新文件。" +msgid "The basic Flower architecture for federated learning." +msgstr "本轮联邦学习。" -#: ../../source/how-to-run-flower-using-docker.rst:324 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:64 msgid "" -"The ``Dockerfile.serverapp`` contains the instructions that assemble the " -"ServerApp image." -msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" +"In a Flower app project, users will typically develop the ``ServerApp`` " +"and the ``ClientApp``. All the network communication between `server` and" +" `clients` is taken care of by the SuperLink and SuperNodes." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:335 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:70 msgid "" -"In the first two lines, we instruct Docker to use the ServerApp image " -"tagged ``1.8.0`` as a base image and set our working directory to " -"``/app``. The following instructions will now be executed in the ``/app``" -" directory. In the last two lines, we copy the ``server.py`` module into " -"the image and set the entry point to ``flower-server-app`` with the " -"argument ``server:app``. The argument is the object reference of the " -"ServerApp (``:``) that will be run inside the " -"ServerApp container." +"For more details, please refer to the |serverapp_link|_ and " +"|clientapp_link|_ documentation." msgstr "" -"在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 ServerApp 镜像作为基础镜像,并将工作目录设置为 " -"``/app``。下面的指令将在 ``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " -"模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 ``server:app``。参数是将在 " -"ServerApp 容器内运行的 ServerApp 的对象引用(``<模块>:<属性>``)。" - -#: ../../source/how-to-run-flower-using-docker.rst:343 -#, fuzzy -msgid "Building the ServerApp Docker image" -msgstr "启动服务器" -#: ../../source/how-to-run-flower-using-docker.rst:345 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:73 msgid "" -"Next, we build the ServerApp Docker image by running the following " -"command in the directory where Dockerfile and ServerApp code are located." -msgstr "接下来,我们在 Dockerfile 和 ServerApp 代码所在的目录下运行以下命令,构建 ServerApp Docker 镜像。" +"With *multi-run*, multiple ``ServerApp``\\s and ``ClientApp``\\s are now " +"capable of running on the same federation consisting of a single long-" +"running SuperLink and multiple long-running SuperNodes. This is sometimes" +" referred to as `multi-tenancy` or `multi-job`." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:352 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:78 msgid "" -"We gave the image the name ``flwr_serverapp``, and the tag ``0.0.1``. " -"Remember that the here chosen values only serve as an example. You can " -"change them to your needs." -msgstr "我们给图片命名为 ``flwr_serverapp``,标签为 ``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" - -#: ../../source/how-to-run-flower-using-docker.rst:357 -#, fuzzy -msgid "Running the ServerApp Docker image" -msgstr "启动服务器" +"As shown in the figure below, two projects, each consisting of a " +"``ServerApp`` and a ``ClientApp``, could share the same SuperLink and " +"SuperNodes." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:359 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy -msgid "Now that we have built the ServerApp image, we can finally run it." -msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" +msgid "Multi-tenancy federated learning architecture" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:371 +#: ../../source/explanation-flower-architecture.rst:87 #, fuzzy -msgid "``flwr_serverapp:0.0.1``: The name the tag of the Docker image to use." -msgstr "flwr_serverapp:0.0.1``: 要使用的 Docker 映像的名称和标记。" +msgid "Multi-tenancy federated learning architecture with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" -#: ../../source/how-to-run-flower-using-docker.rst -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:89 msgid "" -"``--superlink 192.168.1.100:9091``: This option specifies the address of " -"the SuperLinks Driver" -msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" +"To illustrate how multi-run works, consider one federated learning " +"training run where a ``ServerApp`` and a ``ClientApp`` are participating " +"in ``[run 1]``. Note that a SuperNode will only run a ``ClientApp`` if it" +" is selected to participate in the training run." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:385 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:94 msgid "" -"To test running Flower locally, you can create a `bridge network " -"`__, use the ``--network`` argument and pass the " -"name of the Docker network to run your ServerApps." +"In ``[run 1]`` below, all the SuperNodes are selected and therefore run " +"their corresponding ``ClientApp``\\s:" msgstr "" -"要测试在本地运行 Flower,可以创建一个 ``bridge network `___,使用 " -"``--network`` 参数并传递 Docker 网络的名称,以运行 ServerApps。" -#: ../../source/how-to-run-flower-using-docker.rst:389 +#: ../../source/explanation-flower-architecture.rst:103 #, fuzzy -msgid "" -"Any argument that comes after the tag is passed to the Flower ServerApp " -"binary. To see all available flags that the ServerApp supports, run:" -msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" +msgid "Multi-tenancy federated learning architecture - Run 1" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:399 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:103 msgid "" -"To enable SSL, we will need to mount a PEM-encoded root certificate into " -"your ServerApp container." -msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" +"Run 1 in a multi-run federated learning architecture with Flower. All " +"SuperNodes participate in the training round." +msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:401 -#, fuzzy +#: ../../source/explanation-flower-architecture.rst:106 msgid "" -"Assuming the certificate already exists locally, we can use the flag " -"``--volume`` to mount the local certificate into the container's " -"``/app/`` directory. This allows the ServerApp to access the certificate " -"within the container. Use the ``--root-certificates`` flags when starting" -" the container." +"However, in ``[run 2]``, only the first and third SuperNodes are selected" +" to participate in the training:" msgstr "" -"假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 ``-v`` 将本地目录挂载到容器的 " -"``/app/`` 目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` 标志将证书名称传递给服务器。" -#: ../../source/how-to-run-flower-using-docker.rst:412 +#: ../../source/explanation-flower-architecture.rst:115 #, fuzzy -msgid "Advanced Docker options" -msgstr "高级安装选项" +msgid "Multi-tenancy federated learning architecture - Run 2" +msgstr "使用联邦学习策略" -#: ../../source/how-to-run-flower-using-docker.rst:415 -msgid "Run with root user privileges" +#: ../../source/explanation-flower-architecture.rst:115 +msgid "" +"Run 2 in a multi-run federated learning architecture with Flower. Only " +"the first and third SuperNodes are selected to participate in the " +"training round." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:417 +#: ../../source/explanation-flower-architecture.rst:118 msgid "" -"Flower Docker images, by default, run with a non-root user " -"(username/groupname: ``app``, UID/GID: ``49999``). Using root user is not" -" recommended unless it is necessary for specific tasks during the build " -"process. Always make sure to run the container as a non-root user in " -"production to maintain security best practices." +"Therefore, with Flower multi-run, different projects (each consisting of " +"a ``ServerApp`` and ``ClientApp``) can run on different sets of clients." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:422 -msgid "**Run a container with root user privileges**" +#: ../../source/explanation-flower-architecture.rst:121 +msgid "" +"To help you start and manage all of the concurrently executing training " +"runs, Flower offers one additional long-running server-side service " +"called **SuperExec**. When you type ``flwr run`` to start a new training " +"run, the ``flwr`` CLI bundles your local project (mainly your " +"``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. The " +"**SuperExec** will then take care of starting and managing your " +"``ServerApp``, which in turn selects SuperNodes to execute your " +"``ClientApp``." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:424 +#: ../../source/explanation-flower-architecture.rst:128 msgid "" -"Run the Docker image with the ``-u`` flag and specify ``root`` as the " -"username:" +"This architecture allows many users to (concurrently) run their projects " +"on the same federation, simply by typing ``flwr run`` on their local " +"developer machine." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:430 -msgid "This command will run the Docker container with root user privileges." +#: ../../source/explanation-flower-architecture.rst:137 +msgid "Flower Deployment Engine with SuperExec" msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:432 -msgid "**Run the build process with root user privileges**" +#: ../../source/explanation-flower-architecture.rst:137 +msgid "The SuperExec service for managing concurrent training runs in Flower." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:434 +#: ../../source/explanation-flower-architecture.rst:141 msgid "" -"If you want to switch to the root user during the build process of the " -"Docker image to install missing system dependencies, you can use the " -"``USER root`` directive within your Dockerfile." +"This explanation covers the Flower Deployment Engine. An explanation " +"covering the Flower Simulation Engine will follow." msgstr "" -#: ../../source/how-to-run-flower-using-docker.rst:454 -#, fuzzy -msgid "Using a different Flower version" -msgstr "使用不同的 Flower 或 Python 版本" - -#: ../../source/how-to-run-flower-using-docker.rst:456 +#: ../../source/explanation-flower-architecture.rst:146 #, fuzzy msgid "" -"If you want to use a different version of Flower, for example Flower " -"nightly, you can do so by changing the tag. All available versions are on" -" `Docker Hub `__." -msgstr "" -"如果您想使用不同版本的 Flower 或 Python,可以通过更改标签来实现。我们提供的所有版本都可以在 `Docker Hub " -"`_ 上找到。" - -#: ../../source/how-to-run-flower-using-docker.rst:460 -#, fuzzy -msgid "Pinning a Docker image to a specific version" -msgstr "将 Docker 映像固定到特定版本" +"As we continue to enhance Flower at a rapid pace, we'll periodically " +"update this explainer document. Feel free to share any feedback with us." +msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/how-to-run-flower-using-docker.rst:462 -#, fuzzy -msgid "" -"It may happen that we update the images behind the tags. Such updates " -"usually include security updates of system dependencies that should not " -"change the functionality of Flower. However, if you want to ensure that " -"you always use the same image, you can specify the hash of the image " -"instead of the tag." -msgstr "" -"我们可能会更新标签后面的图像。此类更新通常包括系统依赖项的安全更新,不会改变 Flower " -"的功能。不过,如果您想确保始终使用同一张图片,可以指定图片的哈希值而不是标签。" +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "FED 模板" -#: ../../source/how-to-run-flower-using-docker.rst:467 -#, fuzzy -msgid "" -"The following command returns the current image hash referenced by the " -"``superlink:1.8.0`` tag:" -msgstr "下面的命令将返回由 ``server:1.7.0-py3.11-ubuntu22.04`` 标记引用的当前图像哈希值:" +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "目录" -#: ../../source/how-to-run-flower-using-docker.rst:474 -#, fuzzy -msgid "Next, we can pin the hash when running a new SuperLink container:" -msgstr "接下来,我们可以在运行新服务器容器时将哈希值固定下来:" +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "[目录](#table-of-contents)" -#: ../../source/how-to-run-flower-using-docker.rst:483 -#, fuzzy -msgid "Setting environment variables" -msgstr "设置编码环境" +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "[总结](#summary)" -#: ../../source/how-to-run-flower-using-docker.rst:485 -#, fuzzy -msgid "" -"To set a variable inside a Docker container, you can use the ``-e " -"=`` flag." -msgstr "要在 Docker 容器内设置变量,可以使用 ``-e =`` 标志。" +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "[动机](#motivation)" -#: ../../source/how-to-run-simulations.rst:2 -msgid "Run simulations" -msgstr "运行模拟" +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "[目标](#goals)" -#: ../../source/how-to-run-simulations.rst:8 -msgid "" -"Simulating Federated Learning workloads is useful for a multitude of use-" -"cases: you might want to run your workload on a large cohort of clients " -"but without having to source, configure and mange a large number of " -"physical devices; you might want to run your FL workloads as fast as " -"possible on the compute systems you have access to without having to go " -"through a complex setup process; you might want to validate your " -"algorithm on different scenarios at varying levels of data and system " -"heterogeneity, client availability, privacy budgets, etc. These are among" -" some of the use-cases where simulating FL workloads makes sense. Flower " -"can accommodate these scenarios by means of its `VirtualClientEngine " -"`_ or " -"VCE." -msgstr "" -"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" -" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " -"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "[非目标](#non-goals)" -#: ../../source/how-to-run-simulations.rst:10 -msgid "" -"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" -" clients. These clients are identical to `non-virtual` clients (i.e. the " -"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " -"creating a class inheriting, for example, from `flwr.client.NumPyClient " -"`_ and therefore behave in an " -"identical way. In addition to that, clients managed by the " -":code:`VirtualClientEngine` are:" -msgstr "" -":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" -" `_启动的客户端),因为它们可以通过创建一个继承自 " -"`flwr.client.NumPyClient `_ " -"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "[计划](#proposal)" -#: ../../source/how-to-run-simulations.rst:12 -msgid "" -"resource-aware: this means that each client gets assigned a portion of " -"the compute and memory on your system. You as a user can control this at " -"the beginning of the simulation and allows you to control the degree of " -"parallelism of your Flower FL simulation. The fewer the resources per " -"client, the more clients can run concurrently on the same hardware." -msgstr "" -"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " -"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "[缺点](#drawbacks)" -#: ../../source/how-to-run-simulations.rst:13 -msgid "" -"self-managed: this means that you as a user do not need to launch clients" -" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " -"internals." -msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "[备选方案](#alternatives-considered)" -#: ../../source/how-to-run-simulations.rst:14 -msgid "" -"ephemeral: this means that a client is only materialized when it is " -"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," -" releasing the resources it was assigned and allowing in this way other " -"clients to participate." -msgstr "" -"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " -")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "[附录](#appendix)" -#: ../../source/how-to-run-simulations.rst:16 -msgid "" -"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " -"`_, an open-source framework for scalable Python " -"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " -"of `Actors `_ to " -"spawn `virtual` clients and run their workload." -msgstr "" -":code:`VirtualClientEngine`使用`Ray " -"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" -" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "总结" -#: ../../source/how-to-run-simulations.rst:20 -msgid "Launch your Flower simulation" -msgstr "启动 Flower 模拟" +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "\\[TODO - 句子 1: 问题概括\\]" -#: ../../source/how-to-run-simulations.rst:22 -msgid "" -"Running Flower simulations still require you to define your client class," -" a strategy, and utility functions to download and load (and potentially " -"partition) your dataset. With that out of the way, launching your " -"simulation is done with `start_simulation `_ and a minimal example looks" -" as follows:" -msgstr "" -"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " -"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "\\[TODO - 句子 2: 解决方案概括\\]" -#: ../../source/how-to-run-simulations.rst:44 -msgid "VirtualClientEngine resources" -msgstr "虚拟客户端引擎资源" +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "动机" -#: ../../source/how-to-run-simulations.rst:45 -msgid "" -"By default the VCE has access to all system resources (i.e. all CPUs, all" -" GPUs, etc) since that is also the default behavior when starting Ray. " -"However, in some settings you might want to limit how many of your system" -" resources are used for simulation. You can do this via the " -":code:`ray_init_args` input argument to :code:`start_simulation` which " -"the VCE internally passes to Ray's :code:`ray.init` command. For a " -"complete list of settings you can configure check the `ray.init " -"`_" -" documentation. Do not set :code:`ray_init_args` if you want the VCE to " -"use all your system's CPUs and GPUs." -msgstr "" -"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " -"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " -":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" -" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " -"GPU,请不要设置 :code:`ray_init_args`。" +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "\\[TODO\\]" -#: ../../source/how-to-run-simulations.rst:62 -msgid "Assigning client resources" -msgstr "分配客户端资源" +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "目标" -#: ../../source/how-to-run-simulations.rst:63 -msgid "" -"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" -" nothing else) to each virtual client. This means that if your system has" -" 10 cores, that many virtual clients can be concurrently running." -msgstr "" -"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " -"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "非目标" -#: ../../source/how-to-run-simulations.rst:65 -msgid "" -"More often than not, you would probably like to adjust the resources your" -" clients get assigned based on the complexity (i.e. compute and memory " -"footprint) of your FL workload. You can do so when starting your " -"simulation by setting the argument `client_resources` to " -"`start_simulation `_." -" Two keys are internally used by Ray to schedule and spawn workloads (in " -"our case Flower clients):" -msgstr "" -"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " -"`client_resources` 设置为 `start_simulation `_ 。Ray " -"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "提案" -#: ../../source/how-to-run-simulations.rst:67 -msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." -msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "缺点" -#: ../../source/how-to-run-simulations.rst:68 -msgid "" -":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " -"assigned." -msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "备选方案" -#: ../../source/how-to-run-simulations.rst:70 -msgid "Let's see a few examples:" -msgstr "让我们来看几个例子:" +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "\\[备选 1\\]" -#: ../../source/how-to-run-simulations.rst:89 -msgid "" -"While the :code:`client_resources` can be used to control the degree of " -"concurrency in your FL simulation, this does not stop you from running " -"dozens, hundreds or even thousands of clients in the same round and " -"having orders of magnitude more `dormant` (i.e. not participating in a " -"round) clients. Let's say you want to have 100 clients per round but your" -" system can only accommodate 8 clients concurrently. The " -":code:`VirtualClientEngine` will schedule 100 jobs to run (each " -"simulating a client sampled by the strategy) and then will execute them " -"in a resource-aware manner in batches of 8." -msgstr "" -"虽然 :code:`client_resources` 可用来控制 FL " -"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " -"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " -"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " -"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "\\[备选 2\\]" -#: ../../source/how-to-run-simulations.rst:91 -msgid "" -"To understand all the intricate details on how resources are used to " -"schedule FL clients and how to define custom resources, please take a " -"look at the `Ray documentation `_." -msgstr "" -"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " -"`_。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "Flower 改善文档" -#: ../../source/how-to-run-simulations.rst:94 -msgid "Simulation examples" -msgstr "模拟示例" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "[增强文档模版](#enhancement-doc-template)" -#: ../../source/how-to-run-simulations.rst:96 -msgid "" -"A few ready-to-run complete examples for Flower simulation in " -"Tensorflow/Keras and PyTorch are provided in the `Flower repository " -"`_. You can run them on Google Colab too:" -msgstr "" -"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " -"`_ 中提供。您也可以在 Google Colab 上运行它们:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "[描述数据](#metadata)" -#: ../../source/how-to-run-simulations.rst:98 -msgid "" -"`Tensorflow/Keras Simulation " -"`_: 100 clients collaboratively train a MLP model on MNIST." -msgstr "" -"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "[工作流程](#workflow)" -#: ../../source/how-to-run-simulations.rst:99 -msgid "" -"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " -"MNIST." -msgstr "" -"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "[GitHub 问题](#github-issues)" -#: ../../source/how-to-run-simulations.rst:104 -msgid "Multi-node Flower simulations" -msgstr "多节点 Flower 模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "[谷歌文档](#google-docs)" -#: ../../source/how-to-run-simulations.rst:106 -msgid "" -"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " -"across multiple compute nodes. Before starting your multi-node simulation" -" ensure that you:" -msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "改善 Flower 功能是一个标准化的开发流程,目的是" -#: ../../source/how-to-run-simulations.rst:108 -msgid "Have the same Python environment in all nodes." -msgstr "所有节点都有相同的 Python 环境。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "为提出更大规模的改动提供一个共同的结构" -#: ../../source/how-to-run-simulations.rst:109 -msgid "Have a copy of your code (e.g. your entire repo) in all nodes." -msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "确保改动的动机明确" -#: ../../source/how-to-run-simulations.rst:110 -msgid "" -"Have a copy of your dataset in all nodes (more about this in " -":ref:`simulation considerations `)" -msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "将项目信息保存在版本控制系统中" -#: ../../source/how-to-run-simulations.rst:111 -msgid "" -"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " -"`_ so the " -":code:`VirtualClientEngine` attaches to a running Ray instance." -msgstr "" -"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " -":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "记录面向用户的具有影响力的改动的动机" -#: ../../source/how-to-run-simulations.rst:112 -msgid "" -"Start Ray on you head node: on the terminal type :code:`ray start " -"--head`. This command will print a few lines, one of which indicates how " -"to attach other nodes to the head node." -msgstr "" -"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" -"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "保留 GitHub 问题,用于跟踪进行中的工作" -#: ../../source/how-to-run-simulations.rst:113 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 msgid "" -"Attach other nodes to the head node: copy the command shown after " -"starting the head and execute it on terminal of a new node: for example " -":code:`ray start --address='192.168.1.132:6379'`" -msgstr "" -"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " -"--address='192.168.1.132:6379'`" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" +msgstr "确保社区参与者能够成功推动改动,完成一个或多个版本,同时利益相关者在整个过程中得到充分展现" -#: ../../source/how-to-run-simulations.rst:115 -msgid "" -"With all the above done, you can run your code from the head node as you " -"would if the simulation was running on a single node." -msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "因此,\"增强文件\"将以下方面结合起来" -#: ../../source/how-to-run-simulations.rst:117 -msgid "" -"Once your simulation is finished, if you'd like to dismantle your cluster" -" you simply need to run the command :code:`ray stop` in each node's " -"terminal (including the head node)." -msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "一个功能和效力跟踪文档" -#: ../../source/how-to-run-simulations.rst:120 -msgid "Multi-node simulation good-to-know" -msgstr "了解多节点模拟" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "一个产品需要文档" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "一个设计文档" -#: ../../source/how-to-run-simulations.rst:122 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 msgid "" -"Here we list a few interesting functionality when running multi-node FL " -"simulations:" -msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "该文件是与社区合作逐步创建的。" -#: ../../source/how-to-run-simulations.rst:124 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 msgid "" -"User :code:`ray status` to check all nodes connected to your head node as" -" well as the total resources available to the " -":code:`VirtualClientEngine`." +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." msgstr "" -"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " -"可用的总资源。" +"对于向 Flower 提出的远期变更或功能,需要一个超越单个 GitHub 问题或拉取请求(pull " +"request)的抽象概念,以了解和沟通项目即将发生的变更。" -#: ../../source/how-to-run-simulations.rst:126 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 msgid "" -"When attaching a new node to the head, all its resources (i.e. all CPUs, " -"all GPUs) will be visible by the head node. This means that the " -":code:`VirtualClientEngine` can schedule as many `virtual` clients as " -"that node can possible run. In some settings you might want to exclude " -"certain resources from the simulation. You can do this by appending " -"`--num-cpus=` and/or `--num-" -"gpus=` in any :code:`ray start` command (including " -"when starting the head)" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." msgstr "" -"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" -" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" -" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" -"gpus=`" - -#: ../../source/how-to-run-simulations.rst:132 -msgid "Considerations for simulations" -msgstr "模拟的注意事项" +"这一流程的目的是减少我们社区中 \"部落知识 \"的数量。通过将决策从 Slack " +"线程、视频通话和走廊对话转移到一个跟踪良好的工作环境中,该流程旨在加强沟通和可发现性。" -#: ../../source/how-to-run-simulations.rst:135 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 msgid "" -"We are actively working on these fronts so to make it trivial to run any " -"FL workload with Flower simulation." -msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." +msgstr "任何较大的、面向用户的增强都应遵循增强流程。如果要以书面或口头形式向作者或开发人员以外的任何人描述增强功能,则应考虑创建改善文档。" -#: ../../source/how-to-run-simulations.rst:138 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 msgid "" -"The current VCE allows you to run Federated Learning workloads in " -"simulation mode whether you are prototyping simple scenarios on your " -"personal laptop or you want to train a complex FL pipeline across " -"multiple high-performance GPU nodes. While we add more capabilities to " -"the VCE, the points below highlight some of the considerations to keep in" -" mind when designing your FL pipeline with Flower. We also highlight a " -"couple of current limitations in our implementation." -msgstr "" -"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" -" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " -"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" - -#: ../../source/how-to-run-simulations.rst:141 -msgid "GPU resources" -msgstr "GPU 资源" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." +msgstr "同样,任何会对开发社区的大部分人产生影响的技术工作(重构、重大架构变更)也应广泛传播。即使对典型用户或操作员的影响为零,改进流程也适用于这种情况。" -#: ../../source/how-to-run-simulations.rst:143 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 msgid "" -"The VCE assigns a share of GPU memory to a client that specifies the key " -":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " -"internally by the VCE) is by default:" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." msgstr "" -"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " -"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" +"对于小的改动和添加,通过 \"改善\"程序既耗时又没有必要。例如,这包括添加新的联邦学习算法,因为这只会增加功能,而不会改变 \"Flower " +"\"的工作或使用方式。" -#: ../../source/how-to-run-simulations.rst:146 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 msgid "" -"not aware of the total VRAM available on the GPUs. This means that if you" -" set :code:`num_gpus=0.5` and you have two GPUs in your system with " -"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" -" concurrently." -msgstr "" -"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " -"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "增强功能与功能请求不同,因为它们已经提供了实施路径,并得到了社区成员的支持。" -#: ../../source/how-to-run-simulations.rst:147 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 msgid "" -"not aware of other unrelated (i.e. not created by the VCE) workloads are " -"running on the GPU. Two takeaways from this are:" -msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." +msgstr "增强功能被记录在一个 Markdown 文件中,该文件遵循已定义的模板和工作流程,用于审查和存储增强功能文档(即增强功能文档)以供参考。" -#: ../../source/how-to-run-simulations.rst:149 -msgid "" -"Your Flower server might need a GPU to evaluate the `global model` after " -"aggregation (by instance when making use of the `evaluate method `_)" -msgstr "" -"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "增强文档模板" -#: ../../source/how-to-run-simulations.rst:150 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 msgid "" -"If you want to run several independent Flower simulations on the same " -"machine you need to mask-out your GPUs with " -":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " -"experiment." -msgstr "" -"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " -":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "每个增强文档都以 Markdown 文件的形式提供,其结构如下" -#: ../../source/how-to-run-simulations.rst:153 -msgid "" -"In addition, the GPU resource limits passed to :code:`client_resources` " -"are not `enforced` (i.e. they can be exceeded) which can result in the " -"situation of client using more VRAM than the ratio specified when " -"starting the simulation." -msgstr "" -"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" -" VRAM 超过启动模拟时指定的比例。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "描述数据([如下所述](#metadata) 以 YAML 前言的形式出现)" -#: ../../source/how-to-run-simulations.rst:156 -msgid "TensorFlow with GPUs" -msgstr "使用 GPU 的 TensorFlow" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "标题(与描述数据中的标题相同)" -#: ../../source/how-to-run-simulations.rst:158 -msgid "" -"When `using a GPU with TensorFlow " -"`_ nearly your entire GPU memory of" -" all your GPUs visible to the process will be mapped. This is done by " -"TensorFlow for optimization purposes. However, in settings such as FL " -"simulations where we want to split the GPU into multiple `virtual` " -"clients, this is not a desirable mechanism. Luckily we can disable this " -"default behavior by `enabling memory growth " -"`_." -msgstr "" -"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" -" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " -"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " -"`_来禁用这一默认行为。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "目录(如有需要)" -#: ../../source/how-to-run-simulations.rst:160 -msgid "" -"This would need to be done in the main process (which is where the server" -" would run) and in each Actor created by the VCE. By means of " -":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " -"in order to specify a function to be executed upon actor initialization. " -"In this case, to enable GPU growth for TF workloads. It would look as " -"follows:" -msgstr "" -"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " -":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" -" TF 工作负载的 GPU 增长,它看起来如下:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "注意事项/限制/警告(可选)" -#: ../../source/how-to-run-simulations.rst:179 -msgid "" -"This is precisely the mechanism used in `Tensorflow/Keras Simulation " -"`_ example." -msgstr "" -"这正是 \"Tensorflow/Keras 模拟 " -"`_\"示例中使用的机制。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "设计细节(可选)" -#: ../../source/how-to-run-simulations.rst:183 -msgid "Multi-node setups" -msgstr "多节点设置" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "毕业标准" -#: ../../source/how-to-run-simulations.rst:185 -msgid "" -"The VCE does not currently offer a way to control on which node a " -"particular `virtual` client is executed. In other words, if more than a " -"single node have the resources needed by a client to run, then any of " -"those nodes could get the client workload scheduled onto. Later in the FL" -" process (i.e. in a different round) the same client could be executed by" -" a different node. Depending on how your clients access their datasets, " -"this might require either having a copy of all dataset partitions on all " -"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " -"circumvent data duplication." -msgstr "" -"VCE 目前不提供控制特定 \"虚拟 " -"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " -"FL " -"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" -" nfs 或数据库)来避免数据重复。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "升级/降级策略(如适用)" -#: ../../source/how-to-run-simulations.rst:187 -msgid "" -"By definition virtual clients are `stateless` due to their ephemeral " -"nature. A client state can be implemented as part of the Flower client " -"class but users need to ensure this saved to persistent storage (e.g. a " -"database, disk) and that can be retrieve later by the same client " -"regardless on which node it is running from. This is related to the point" -" above also since, in some way, the client's dataset could be seen as a " -"type of `state`." -msgstr "" -"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " -"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" -" \"状态\"。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "作为参考,本文件采用上述结构。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 -msgid "Save and load model checkpoints" -msgstr "保存和加载模型检查点" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +#: ../../source/ref-api/flwr.common.Metadata.rst:2 +msgid "Metadata" +msgstr "描述数据" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 msgid "" -"Flower does not automatically save model updates on the server-side. This" -" how-to guide describes the steps to save (and load) model checkpoints in" -" Flower." -msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." +msgstr "**fed-number**(必填)上一个Flower增强文件的 \"fed-number \"+1。有了这个编号,就很容易参考其他提案。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 -msgid "Model checkpointing" -msgstr "模型检查点" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "**标题** (必填)用简明语言写出提案的标题。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 msgid "" -"Model updates can be persisted on the server-side by customizing " -":code:`Strategy` methods. Implementing custom strategies is always an " -"option, but for many cases it may be more convenient to simply customize " -"an existing strategy. The following code example defines a new " -":code:`SaveModelStrategy` which customized the existing built-in " -":code:`FedAvg` strategy. In particular, it customizes " -":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " -"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" -" before it returns those aggregated weights to the caller (i.e., the " -"server):" -msgstr "" -"模型更新可通过自定义 :code:`Strategy` " -"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " -":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " -"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " -":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "**status** (必填)提案的当前状态。有关可能的状态,请参阅 [工作流程](#workflow)。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 -msgid "Save and load PyTorch checkpoints" -msgstr "保存和加载 PyTorch 检查点" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +msgid "" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "**作者**(必填) 提案的作者列表。这只是 GitHub ID。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 msgid "" -"Similar to the previous example but with a few extra steps, we'll show " -"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " -"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" -" transformed into a list of NumPy ``ndarray``'s, then those are " -"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" -" class structure." -msgstr "" -"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " -"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " -"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "**创建日期**(必填) 建议书在 PR 中首次提交的日期。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 msgid "" -"To load your progress, you simply append the following lines to your " -"code. Note that this will iterate over all saved checkpoints and load the" -" latest one:" -msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "**最后更新** (可选)提案最后一次重大修改的日期。" -#: ../../source/how-to-save-and-load-model-checkpoints.rst:97 -#, fuzzy +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 msgid "" -"Return/use this object of type ``Parameters`` wherever necessary, such as" -" in the ``initial_parameters`` when defining a ``Strategy``." -msgstr "" -"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " -"``initial_parameters` 中。" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "**另见** (可选)与本提案相关的其他提案清单。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 -msgid "Upgrade to Flower 1.0" -msgstr "升级至 Flower 1.0" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "**取代**(可选) 这份提案所取代的提案列表。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 -msgid "" -"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" -" foundation for future growth. Compared to Flower 0.19 (and other 0.x " -"series releases), there are a few breaking changes that make it necessary" -" to change the code of existing 0.x-series projects." -msgstr "" -"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " -"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "**被取代者** (可选) 此提案取代的提案列表。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 -#: ../../source/how-to-upgrade-to-flower-next.rst:43 -msgid "Install update" -msgstr "安装更新" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "工作流程" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 msgid "" -"Here's how to update an existing installation to Flower 1.0 using either " -"pip or Poetry:" -msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." +msgstr "形成增强功能的想法应该已经在社区中讨论过或提出过。因此,它需要一个支持者(通常是作者)来引导增强。这个人还必须找到愿意审核提案的提交者。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 -msgid "pip: add ``-U`` when installing." -msgstr "pip: 安装时添加 ``-U``." +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +msgid "" +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." +msgstr "" +"新的增强功能以 `NNNN-YYYYMMDD-enhancement-title.md` 的文件名签入,其中 `NNNN` " +"是花朵增强文档的编号,并将其转入 `enhancements`。作为拉取请求(pull request)的一部分,所有增强功能都从 " +"`provisional` 状态开始。讨论是作为拉取请求审查的一部分进行的。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 msgid "" -"``python -m pip install -U flwr`` (when using ``start_server`` and " -"``start_client``)" -msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." +msgstr "" +"一旦增强功能通过审核和批准,其状态就会变为 " +"`可实施`。实际的实施工作将在单独的拉取请求中完成。这些拉取请求应在其描述中提及相应的增强功能。实施完成后,提案状态将更改为 `已实施`。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 msgid "" -"``python -m pip install -U 'flwr[simulation]'`` (when using " -"``start_simulation``)" -msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "在某些条件下,还可能出现其他状态。增强提案具有以下状态:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 msgid "" -"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " -"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " -"before running ``poetry install``)." -msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "`暂定`: 已提出改进建议并正在积极定义。这是在提案得到充实、积极定义和讨论时的起始状态。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 -msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" -msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "`可实施`: 增强功能已审核通过。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 msgid "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " -"using ``start_simulation``)" -msgstr "" -"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " -"}``(当使用``start_simulation``时)" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "`已实施`: 增强功能已实施,不再主动更改。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 -#: ../../source/how-to-upgrade-to-flower-next.rst:100 -msgid "Required changes" -msgstr "所需变更" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "`推迟`: 已提出改进建议,但尚未积极开展工作。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 -msgid "The following breaking changes require manual updates." -msgstr "以下更改需要手动更新。" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +msgid "" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "`拒绝`: 作者和审稿人已决定不再推进该增强功能。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 -msgid "General" -msgstr "一般情况" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "`撤回`: 作者已撤回增强功能。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 -msgid "" -"Pass all arguments as keyword arguments (not as positional arguments). " -"Here's an example:" -msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "`已替换`: 增强功能已被新的增强功能取代。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 msgid "" -"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " -"FlowerClient())``" -msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." +msgstr "在 GitHub 已提供的流程(问题和拉取请求)之外再增加一个流程,会增加复杂性,并可能成为潜在首次贡献者的障碍。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 msgid "" -"Flower 1.0 (keyword arguments): " -"``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" -msgstr "" -"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())``" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." +msgstr "对于英语非母语者来说,将提案模板扩展到目前要求的单句描述之外可能是一个沉重的负担。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 -#: ../../source/ref-api/flwr.client.Client.rst:2 -msgid "Client" -msgstr "客户端" +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "GitHub 问题" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 msgid "" -"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " -"``def get_parameters(self, config):``" +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." msgstr "" -"NumPyClient的子类:将``def get_parameters(self):```改为``def " -"get_parameters(self,config):``" +"使用 GitHub Issues 进行此类改进是可行的。例如,我们可以使用标签来区分和过滤这些问题。主要的问题在于讨论和审查增强功能: " +"GitHub 问题只有一个评论线程。而增强功能通常会同时有多个讨论线程,针对文档的不同部分。在使用 GitHub " +"问题时,管理这些多重讨论会很混乱。" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "谷歌文档" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 msgid "" -"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " -"get_parameters(self, ins: GetParametersIns):``" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." msgstr "" -"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " -"GetParametersIns):\"" +"谷歌文档允许多线程讨论。但是,由于谷歌文档是在项目之外托管的,因此需要注意它们是否能被社区发现。我们必须管理所有提案的链接列表,并提供给社区使用。与作为" +" Flower 资源库一部分的提案相比,丢失链接的可能性要大得多。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 -msgid "Strategies / ``start_server`` / ``start_simulation``" -msgstr "策略 / ``start_server`` / ``start_simulation``" +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "FED - Flower 增强文件" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 -msgid "" -"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " -"``start_simulation``. Here's an example:" -msgstr "" -"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " -"dictionary)。下面是一个例子:" +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "整合评估结果" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +#: ../../source/how-to-aggregate-evaluation-results.rst:4 msgid "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" -msgstr "" -"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " -"\"round_timeout\": 600.0}, ...)``" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "Flower 服务器没有规定整合评估结果的方法,但用户可以完全自定义如何整合。" + +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "自定义整合评估结果" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +#, fuzzy msgid "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"The same ``Strategy``-customization approach can be used to aggregate " +"custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" +msgstr "同样的 :code:`Strategy` 定制方法也可用于汇总来自单个客户端的自定义评估结果。客户端可以通过返回字典的方式向服务器返回自定义指标:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +#: ../../source/how-to-aggregate-evaluation-results.rst:39 msgid "" -"Replace ``num_rounds=1`` in ``start_simulation`` with the new " -"``config=ServerConfig(...)`` (see previous item)" -msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "然后,服务器可以使用定制的策略来汇总这些字典中提供的指标:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +#: ../../source/how-to-authenticate-supernodes.rst:2 +#, fuzzy +msgid "Authenticate SuperNodes" +msgstr "验证超级节点" + +#: ../../source/how-to-authenticate-supernodes.rst:4 +#, fuzzy msgid "" -"Remove ``force_final_distributed_eval`` parameter from calls to " -"``start_server``. Distributed evaluation on all clients can be enabled by" -" configuring the strategy to sample all clients for evaluation after the " -"last round of training." +"Flower has built-in support for authenticated SuperNodes that you can use" +" to verify the identities of each SuperNode connecting to a SuperLink. " +"Flower node authentication works similar to how GitHub SSH authentication" +" works:" msgstr "" -"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " -"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" +"Flower 内置了对经过身份验证的超级节点的支持,您可以用它来验证连接到超级链接的每个超级节点的身份。Flower 节点身份验证的工作方式与 " +"GitHub SSH 身份验证的工作方式类似:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 -msgid "Rename parameter/ndarray conversion functions:" -msgstr "重命名参数/数组转换函数:" +#: ../../source/how-to-authenticate-supernodes.rst:8 +#, fuzzy +msgid "SuperLink (server) stores a list of known (client) node public keys" +msgstr "超级链接(服务器)存储已知(客户端)节点公钥列表" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 -msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 -msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +#: ../../source/how-to-authenticate-supernodes.rst:9 +#, fuzzy msgid "" -"Strategy initialization: if the strategy relies on the default values for" -" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " -"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " -"create a strategy (by calling ``start_server`` or ``start_simulation`` " -"without passing a strategy instance) should now manually initialize " -"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." -msgstr "" -"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " -"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " -"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " -"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 -msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" -msgstr "重命名内置策略参数(例如,`FedAvg``):" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 -msgid "``fraction_eval`` --> ``fraction_evaluate``" -msgstr "``fraction_eval`` --> ``fraction_evaluate``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 -msgid "``min_eval_clients`` --> ``min_evaluate_clients``" -msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 -msgid "``eval_fn`` --> ``evaluate_fn``" -msgstr "``eval_fn`` --> ``evaluate_fn``" +"Using ECDH, both SuperNode and SuperLink independently derive a shared " +"secret" +msgstr "使用 ECDH,超级节点和超级链路可独立生成共享秘密" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +#: ../../source/how-to-authenticate-supernodes.rst:10 +#, fuzzy msgid "" -"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " -"functions, for example, ``configure_fit``, ``aggregate_fit``, " -"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." -msgstr "" -"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " -"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" -" 和 ``evaluate_fn``。" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 -msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" -msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" +"Shared secret is used to compute the HMAC value of the message sent from " +"SuperNode to SuperLink as a token" +msgstr "共享秘密用于计算作为令牌从超级节点发送到超级链接的信息的 HMAC 值" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 -msgid "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" +#: ../../source/how-to-authenticate-supernodes.rst:12 +#, fuzzy +msgid "SuperLink verifies the token" +msgstr "超级链接验证令牌" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +#: ../../source/how-to-authenticate-supernodes.rst:14 +#, fuzzy msgid "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" +"We recommend you to check out the complete `code example " +"`_ demonstrating federated learning with Flower in an " +"authenticated setting." msgstr "" -"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " -"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " -"Scalar]]]:``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 -msgid "Custom strategies" -msgstr "定制策略" +"请参阅`完整代码示例 " +"`_了解更多信息。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +#: ../../source/how-to-authenticate-supernodes.rst:20 +#, fuzzy msgid "" -"The type of parameter ``failures`` has changed from " -"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " -"BaseException]]`` (in ``aggregate_fit``) and " -"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " -"``aggregate_evaluate``)" -msgstr "" -"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," -" FitRes], " -"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " -"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" +"This guide covers a preview feature that might change in future versions " +"of Flower." +msgstr "本指南涵盖的预览功能可能会在 Flower 的未来版本中有所改变。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +#: ../../source/how-to-authenticate-supernodes.rst:24 +#, fuzzy msgid "" -"The ``Strategy`` method ``evaluate`` now receives the current round of " -"federated learning/evaluation as the first parameter:" -msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" +"For increased security, node authentication can only be used when " +"encrypted connections (SSL/TLS) are enabled." +msgstr "为提高安全性,只有启用加密连接(SSL/TLS)时才能使用节点验证。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 -msgid "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:``" -msgstr "" -"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " -"Optional[Tuple[float, Dict[str, Scalar]]]:```" +#: ../../source/how-to-authenticate-supernodes.rst:28 +#, fuzzy +msgid "Enable node authentication in ``SuperLink``" +msgstr "在 :code:`SuperLink` 中启用节点验证" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +#: ../../source/how-to-authenticate-supernodes.rst:30 +#, fuzzy msgid "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"To enable node authentication, first you need to configure SSL/TLS " +"connections to secure the SuperLink<>SuperNode communication. You can " +"find the complete guide `here `_. After configuring secure connections, you" +" can enable client authentication in a long-running Flower ``SuperLink``." +" Use the following terminal command to start a Flower ``SuperNode`` that " +"has both secure connections and node authentication enabled:" msgstr "" -"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " -"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +"要启用节点验证,首先需要配置 SSL/TLS 连接,以确保 SuperLink<>SuperNode 通信的安全。您可以在 " +"`_ " +"找到完整的指南。配置安全连接后,您就可以在长期运行的 Flower " +":code:`SuperLink`中启用客户端身份验证。使用以下终端命令启动一个同时启用了安全连接和节点验证的 Flower " +":code:`SuperNode`:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 -msgid "Optional improvements" -msgstr "可选的改进措施" +#: ../../source/how-to-authenticate-supernodes.rst:47 +#, fuzzy +msgid "Let's break down the authentication flags:" +msgstr "让我们来分析一下身份验证标志:" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +#: ../../source/how-to-authenticate-supernodes.rst:49 +#, fuzzy msgid "" -"Along with the necessary changes above, there are a number of potential " -"improvements that just became possible:" -msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" +"The first flag ``--auth-list-public-keys`` expects a path to a CSV file " +"storing all known node public keys. You need to store all known node " +"public keys that are allowed to participate in a federation in one CSV " +"file (``.csv``)." +msgstr "" +"第一个标志 :code:`--auth-list-public-keys`(密码:`--auth-list-public-keys`)需要一个 " +"CSV 文件路径,该文件存储了所有已知节点的公钥。您需要在一个 CSV 文件(:code:`.csv`)中存储所有允许参与联盟的已知节点公钥。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +#: ../../source/how-to-authenticate-supernodes.rst:53 +#, fuzzy msgid "" -"Remove \"placeholder\" methods from subclasses of ``Client`` or " -"``NumPyClient``. If you, for example, use server-side evaluation, then " -"empty placeholder implementations of ``evaluate`` are no longer " -"necessary." +"A valid CSV file storing known node public keys should list the keys in " +"OpenSSH format, separated by commas and without any comments. For an " +"example, refer to our code sample, which contains a CSV file with two " +"known node public keys." msgstr "" -"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " -"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" +"存储已知节点公开密钥的有效 CSV 文件应以 OpenSSH " +"格式列出密钥,以逗号分隔,不含任何注释。有关示例,请参阅我们的代码示例,其中包含一个包含两个已知节点公钥的 CSV 文件。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +#: ../../source/how-to-authenticate-supernodes.rst:57 +#, fuzzy msgid "" -"Configure the round timeout via ``start_simulation``: " -"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " -"round_timeout=600.0), ...)``" +"The second and third flags ``--auth-superlink-private-key`` and ``--auth-" +"superlink-public-key`` expect paths to the server's private and public " +"keys. For development purposes, you can generate a private and public key" +" pair using ``ssh-keygen -t ecdsa -b 384``." msgstr "" -"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" - -#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 -#: ../../source/how-to-upgrade-to-flower-next.rst:317 -msgid "Further help" -msgstr "更多帮助" +"第二和第三个标记 :code:`--auth-superlink-private-key` 和 :code:`--auth-superlink-" +"public-key` 希望指向服务器私钥和公钥的路径。出于开发目的,您可以使用 :code:`ssh-keygen -t ecdsa -b " +"384` 生成一对私钥和公钥。" -#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +#: ../../source/how-to-authenticate-supernodes.rst:64 +#, fuzzy msgid "" -"Most official `Flower code examples " -"`_ are already updated" -" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " -"API. If there are further questions, `join the Flower Slack " -"`_ and use the channel ``#questions``." +"In Flower 1.9, there is no support for dynamically removing, editing, or " +"adding known node public keys to the SuperLink. To change the set of " +"known nodes, you need to shut the server down, edit the CSV file, and " +"start the server again. Support for dynamically changing the set of known" +" nodes is on the roadmap to be released in Flower 1.10 (ETA: June)." msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +"在 Flower 1.9 中,超级链接不支持动态删除、编辑或添加已知节点公钥。要更改已知节点集,您需要关闭服务器,编辑 CSV " +"文件,然后重新启动服务器。动态更改已知节点集的支持已列入 Flower 1.10(预计发布时间:6 月)的路线图。" -#: ../../source/how-to-upgrade-to-flower-next.rst:2 +#: ../../source/how-to-authenticate-supernodes.rst:71 #, fuzzy -msgid "Upgrade to Flower Next" -msgstr "升级至 Flower 1.0" +msgid "Enable node authentication in ``SuperNode``" +msgstr "在 :code:`SuperNode` 中启用节点验证" -#: ../../source/how-to-upgrade-to-flower-next.rst:4 +#: ../../source/how-to-authenticate-supernodes.rst:73 #, fuzzy msgid "" -"Welcome to the migration guide for updating Flower to Flower Next! " -"Whether you're a seasoned user or just getting started, this guide will " -"help you smoothly transition your existing setup to take advantage of the" -" latest features and improvements in Flower Next, starting from version " -"1.8." +"Similar to the long-running Flower server (``SuperLink``), you can easily" +" enable node authentication in the long-running Flower client " +"(``SuperNode``). Use the following terminal command to start an " +"authenticated ``SuperNode``:" msgstr "" -"欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " -"Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" +"与长期运行的 Flower 服务器(:code:`SuperLink`)类似,您也可以在长期运行的 Flower " +"客户端(:code:`SuperNode`)中轻松启用节点身份验证。使用以下终端命令启动已验证的 :code:`SuperNode`:" -#: ../../source/how-to-upgrade-to-flower-next.rst:9 +#: ../../source/how-to-authenticate-supernodes.rst:85 #, fuzzy msgid "" -"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " -"changes by using the *compatibility layer* in Flower Next. In another " -"guide, we will show how to run Flower Next end-to-end with pure Flower " -"Next APIs." +"The ``--auth-supernode-private-key`` flag expects a path to the node's " +"private key file and the ``--auth-supernode-public-key`` flag expects a " +"path to the node's public key file. For development purposes, you can " +"generate a private and public key pair using ``ssh-keygen -t ecdsa -b " +"384``." msgstr "" -"本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " -"代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" +":code:`--auth-supernode-private-key`标志需要节点私钥文件的路径,:code:`-auth-supernode-" +"public-key`标志需要节点公钥文件的路径。出于开发目的,可以使用 :code:`ssh-keygen -t ecdsa -b 384` " +"生成一对私钥和公钥。" -#: ../../source/how-to-upgrade-to-flower-next.rst:13 +#: ../../source/how-to-authenticate-supernodes.rst:91 #, fuzzy -msgid "Let's dive in!" -msgstr "让我们深入了解一下!" +msgid "Security notice" +msgstr "安全通知" -#: ../../source/how-to-upgrade-to-flower-next.rst:48 +#: ../../source/how-to-authenticate-supernodes.rst:93 #, fuzzy msgid "" -"Here's how to update an existing installation of Flower to Flower Next " -"with ``pip``:" -msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" +"The system's security relies on the credentials of the SuperLink and each" +" SuperNode. Therefore, it is imperative to safeguard and safely store the" +" credentials to avoid security risks such as Public Key Infrastructure " +"(PKI) impersonation attacks. The node authentication mechanism also " +"involves human interaction, so please ensure that all of the " +"communication is done in a secure manner, using trusted communication " +"methods." +msgstr "" +"系统的安全性依赖于超级链接和每个超级节点的凭证。因此,必须保护和安全存储凭证,以避免公钥基础设施 (PKI) " +"假冒攻击等安全风险。节点验证机制还涉及人机交互,因此请确保使用可信的通信方法,以安全的方式进行所有通信。" -#: ../../source/how-to-upgrade-to-flower-next.rst:54 -#, fuzzy -msgid "or if you need Flower Next with simulation:" -msgstr "启动 Flower 模拟" +#: ../../source/how-to-authenticate-supernodes.rst:100 +#: ../../source/how-to-enable-ssl-connections.rst:71 +#: ../../source/how-to-use-built-in-mods.rst:95 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "总结" -#: ../../source/how-to-upgrade-to-flower-next.rst:61 +#: ../../source/how-to-authenticate-supernodes.rst:102 #, fuzzy msgid "" -"Ensure you set the following version constraint in your " -"``requirements.txt``" -msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" - -#: ../../source/how-to-upgrade-to-flower-next.rst:71 -#, fuzzy -msgid "or ``pyproject.toml``:" -msgstr "或 ``pyproject.toml```:" +"You should now have learned how to start a long-running Flower server " +"(``SuperLink``) and client (``SuperNode``) with node authentication " +"enabled. You should also know the significance of the private key and " +"store it safely to minimize security risks." +msgstr "" +"现在,您应该已经学会了如何启动长期运行的 Flower " +"服务器(:code:`SuperLink`)和客户端(:code:`SuperNode`)并启用节点身份验证。您还应该知道私钥的重要性,并将其安全存储,以尽量减少安全风险。" -#: ../../source/how-to-upgrade-to-flower-next.rst:82 -#, fuzzy -msgid "Using Poetry" -msgstr "使用 pip" +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "配置客户端" -#: ../../source/how-to-upgrade-to-flower-next.rst:84 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:4 msgid "" -"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " -"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " -"running ``poetry install``)." -msgstr "" -"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" -" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." +msgstr "除了模型参数,Flower 还可以向客户端发送配置值。配置值有多种用途。它们是一种从服务器控制客户端超参数的常用方法。" -#: ../../source/how-to-upgrade-to-flower-next.rst:86 -#, fuzzy -msgid "" -"Ensure you set the following version constraint in your " -"``pyproject.toml``:" -msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" +#: ../../source/how-to-configure-clients.rst:9 +msgid "Configuration values" +msgstr "配置值" -#: ../../source/how-to-upgrade-to-flower-next.rst:102 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:11 msgid "" -"In Flower Next, the *infrastructure* and *application layers* have been " -"decoupled. Instead of starting a client in code via ``start_client()``, " -"you create a |clientapp_link|_ and start it via the command line. Instead" -" of starting a server in code via ``start_server()``, you create a " -"|serverapp_link|_ and start it via the command line. The long-running " -"components of server and client are called SuperLink and SuperNode. The " -"following non-breaking changes that require manual updates and allow you " -"to run your project both in the traditional way and in the Flower Next " -"way:" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" msgstr "" -"在 Flower Next " -"中,*基础架构层*和*应用层*已经解耦。你不再需要在代码中通过``start_client()``启动客户端,而是创建一个|clientapp_link|_,然后通过命令行启动它。无需通过``start_server()``在代码中启动服务器,而是创建一个" -" |serverapp_link|_ " -"并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" -" Flower Next 方式运行项目:" - -#: ../../source/how-to-upgrade-to-flower-next.rst:109 -#, fuzzy -msgid "|clientapp_link|_" -msgstr "客户端" +"配置值以字典的形式表示,字典的键为 ``str``,值的类型为 ``bool``、``bytes``、``double``(64 " +"位精度浮点型)、``int``或 ``str`(或不同语言中的等效类型)。下面是一个 Python 配置字典的示例:" -#: ../../source/how-to-upgrade-to-flower-next.rst:110 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:25 msgid "" -"Wrap your existing client with |clientapp_link|_ instead of launching it " -"via |startclient_link|_. Here's an example:" -msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." +msgstr "" +"Flower 将这些配置字典(简称 *config dict*)序列化为 ProtoBuf 表示形式,使用 gRPC " +"将其传输到客户端,然后再反序列化为 Python 字典。" -#: ../../source/how-to-upgrade-to-flower-next.rst:132 -#, fuzzy -msgid "|serverapp_link|_" -msgstr "服务器" +#: ../../source/how-to-configure-clients.rst:31 +msgid "" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." +msgstr "" +"目前,还不支持在配置字典中直接发送作为值的集合类型(例如,`Set``, `List`, " +"`Map``)。有几种变通方法可将集合转换为支持的值类型之一(并在客户端将其转换回),从而将集合作为值发送。" -#: ../../source/how-to-upgrade-to-flower-next.rst:133 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:36 msgid "" -"Wrap your existing strategy with |serverapp_link|_ instead of starting " -"the server via |startserver_link|_. Here's an example:" -msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." +msgstr "例如,可以将浮点数列表转换为 JSON 字符串,然后使用配置字典发送 JSON 字符串,再在客户端将 JSON 字符串转换回浮点数列表。" -#: ../../source/how-to-upgrade-to-flower-next.rst:154 -#, fuzzy -msgid "Deployment" -msgstr "调配" +#: ../../source/how-to-configure-clients.rst:41 +msgid "Configuration through built-in strategies" +msgstr "通过内置策略进行配置" -#: ../../source/how-to-upgrade-to-flower-next.rst:155 +#: ../../source/how-to-configure-clients.rst:43 #, fuzzy msgid "" -"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " -"in sequence, |flowernext_clientapp_link|_ (2x) and " -"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" -" `server.py` as Python scripts." +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like ``FedAvg``. Built-in strategies support so-called " +"configuration functions. A configuration function is a function that the " +"built-in strategy calls to get the configuration dictionary for the " +"current round. It then forwards the configuration dictionary to all the " +"clients selected during that round." msgstr "" -"在依次运行 |flowernext_clientapp_link|_ (2x) 和 |flowernext_serverapp_link|_ " -"之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " -"`server.py` 作为 Python 脚本执行。" +"向客户端发送配置值的最简单方法是使用内置策略,如 " +":code:`FedAvg`。内置策略支持所谓的配置函数。配置函数是内置策略调用的函数,用于获取当前轮的配置字典。然后,它会将配置字典转发给该轮中选择的所有客户端。" -#: ../../source/how-to-upgrade-to-flower-next.rst:158 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:49 msgid "" -"Here's an example to start the server without HTTPS (only for " -"prototyping):" -msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" +msgstr "让我们从一个简单的例子开始。想象一下,我们想要发送给客户端(a)应该使用的批次大小,(b)当前联邦学习的全局轮次,以及(c)客户端训练的遍历数。我们的配置函数可以是这样的:" -#: ../../source/how-to-upgrade-to-flower-next.rst:174 +#: ../../source/how-to-configure-clients.rst:65 #, fuzzy msgid "" -"Here's another example to start with HTTPS. Use the ``--ssl-ca-" -"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " -"options to pass paths to (CA certificate, server certificate, and server " -"private key)." -msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +"``on_fit_config_fn``:" +msgstr "为了让内置策略使用这个函数,我们可以在初始化时使用参数 :code:`on_fit_config_fn` 将它传递给 ``FedAvg`` :" -#: ../../source/how-to-upgrade-to-flower-next.rst:201 -#, fuzzy -msgid "Simulation in CLI" -msgstr "运行模拟" +#: ../../source/how-to-configure-clients.rst:75 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "在客户端,我们在 ``fit`` 中接收配置字典:" -#: ../../source/how-to-upgrade-to-flower-next.rst:202 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:86 msgid "" -"Wrap your existing client and strategy with |clientapp_link|_ and " -"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" -" anymore. Here's an example:" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." msgstr "" -"分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " -"|startsim_link|_。下面是一个示例:" +"还有一个 `on_evaluate_config_fn` 用于配置评估,其工作方式相同。它们是不同的函数,因为可能需要向 `evaluate` " +"发送不同的配置值(例如,使用不同的批量大小)。" -#: ../../source/how-to-upgrade-to-flower-next.rst:232 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:90 msgid "" -"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " -"``client_app`` object in the code instead of executing the Python script." -" Here's an example (assuming the ``server_app`` and ``client_app`` " -"objects are in a ``sim.py`` module):" +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" msgstr "" -"在 CLI 中运行 |flower_simulation_link|_ 并指向代码中的 ``server_app`` " -"/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " -"`client_app`` 对象位于 `sim.py`` 模块中):" +"内置策略每轮都会调用此函数(即每次运行 `Strategy.configure_fit` 或 " +"`Strategy.configure_evaluate` 时)。每轮调用 `on_evaluate_config_fn` " +"允许我们在连续几轮中改变配置指令。例如,如果我们想实现一个超参数时间表,以增加后几轮的本地遍历次数,我们可以这样做:" -#: ../../source/how-to-upgrade-to-flower-next.rst:249 +#: ../../source/how-to-configure-clients.rst:107 #, fuzzy -msgid "" -"Set default resources for each |clientapp_link|_ using the ``--backend-" -"config`` command line argument instead of setting the " -"``client_resources`` argument in |startsim_link|_. Here's an example:" -msgstr "" -"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " -"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" +msgid "The ``FedAvg`` strategy will call this function *every round*." +msgstr "代码:`FedAvg`策略*每轮*都会调用该函数。" -#: ../../source/how-to-upgrade-to-flower-next.rst:275 -#, fuzzy -msgid "Simulation in a Notebook" -msgstr "笔记本中的模拟" +#: ../../source/how-to-configure-clients.rst:110 +msgid "Configuring individual clients" +msgstr "配置个别客户端" -#: ../../source/how-to-upgrade-to-flower-next.rst:276 -#, fuzzy +#: ../../source/how-to-configure-clients.rst:112 msgid "" -"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " -"an example:" -msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "在某些情况下,有必要向不同的客户端发送不同的配置值。" -#: ../../source/how-to-upgrade-to-flower-next.rst:319 +#: ../../source/how-to-configure-clients.rst:115 #, fuzzy msgid "" -"Some official `Flower code examples `_ " -"are already updated to Flower Next so they can serve as a reference for " -"using the Flower Next API. If there are further questions, `join the " -"Flower Slack `_ and use the channel " -"``#questions``. You can also `participate in Flower Discuss " -"`_ where you can find us answering questions," -" or share and learn from others about migrating to Flower Next." +"This can be achieved by customizing an existing strategy or by " +":doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes ``FedAvg`` by " +"adding a custom ``\"hello\": \"world\"`` configuration key/value pair to " +"the config dict of a *single client* (only the first client in the list, " +"the other clients in this round to not receive this \"special\" config " +"value):" msgstr "" -"大多数官方的 `Flower 代码示例 `_" -" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " -"`_ 并使用 \"#questions``\"。" +"这可以通过定制现有策略或 `从头开始实施一个定制策略 `_来实现。下面是一个无厘头的例子,`FedAvg`通过在*单个客户端*的配置指令(config " +"dict)中添加自定义的``\"hello\": \"world\"``配置键/值对添加到此的配置 dict " +"中(仅列表中的第一个客户端,本轮中的其他客户端不会收到此 \"特殊 \"配置值):" -#: ../../source/how-to-upgrade-to-flower-next.rst:325 -#, fuzzy -msgid "Important" -msgstr "重要变更:" +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "配置日志记录" -#: ../../source/how-to-upgrade-to-flower-next.rst:328 -#, fuzzy +#: ../../source/how-to-configure-logging.rst:4 msgid "" -"As we continuously enhance Flower Next at a rapid pace, we'll be " -"periodically updating this guide. Please feel free to share any feedback " -"with us!" -msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" +msgstr "Flower 日志记录器会跟踪联邦学习工作负载中发生的所有核心事件。它默认按照标准信息格式提供信息:" -#: ../../source/how-to-upgrade-to-flower-next.rst:334 +#: ../../source/how-to-configure-logging.rst:13 #, fuzzy -msgid "Happy migrating! 🚀" -msgstr "移民愉快!🚀" +msgid "" +"containing relevant information including: log message level (e.g. " +"``INFO``, ``DEBUG``), a timestamp, the line where the logging took place " +"from, as well as the log message itself. In this way, the logger would " +"typically display information on your terminal as follows:" +msgstr "" +"相关信息包括:日志信息级别(例如 " +":code:`INFO`、:code:`DEBUG`)、时间戳、日志记录的行以及日志信息本身。这样,日志记录器通常会在终端上显示如下信息:" -#: ../../source/how-to-use-built-in-mods.rst:2 -#, fuzzy -msgid "Use Built-in Mods" -msgstr "使用内置调制器" +#: ../../source/how-to-configure-logging.rst:35 +msgid "Saving log to file" +msgstr "将日志保存到文件" -#: ../../source/how-to-use-built-in-mods.rst:4 +#: ../../source/how-to-configure-logging.rst:37 #, fuzzy msgid "" -"**Note: This tutorial covers experimental features. The functionality and" -" interfaces may change in future versions.**" -msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do ``fl.server.start_server``) and when using " +"the ``VirtualClientEngine`` (i.e. when you do " +"``fl.simulation.start_simulation``). In some situations you might want to" +" save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" +msgstr "" +"默认情况下,Flower 日志会输出到启动联邦学习工作负载的终端。这既适用于基于 gRPC 的联邦学习(即执行 " +":code:`fl.server.start_server` 时),也适用于使用 :code:`VirtualClientEngine` " +"时(即执行 :code:`fl.simulation.start_simulation` " +"时)。在某些情况下,您可能希望将此日志保存到磁盘。为此,您可以调用 `fl.common.logger.configure() " +"`_" +" 函数。例如:" -#: ../../source/how-to-use-built-in-mods.rst:6 +#: ../../source/how-to-configure-logging.rst:59 #, fuzzy msgid "" -"In this tutorial, we will learn how to utilize built-in mods to augment " -"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " -"allow us to perform operations before and after a task is processed in " -"the ``ClientApp``." +"With the above, Flower will record the log you see on your terminal to " +"``log.txt``. This file will be created in the same directory as were you " +"are running the code from. If we inspect we see the log above is also " +"recorded but prefixing with ``identifier`` each line:" msgstr "" -"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" -" 处理任务之前和之后执行操作。" +"通过上述操作,Flower 会将您在终端上看到的日志记录到 " +":code:`log.txt`。该文件将创建在运行代码的同一目录下。如果我们检查一下,就会发现上面的日志也被记录了下来,但每一行都以 " +":code:`identifier` 作为前缀:" -#: ../../source/how-to-use-built-in-mods.rst:9 -#, fuzzy -msgid "What are Mods?" -msgstr "什么是 Mods?" +#: ../../source/how-to-configure-logging.rst:81 +msgid "Log your own messages" +msgstr "记录自己的信息" -#: ../../source/how-to-use-built-in-mods.rst:11 -#, fuzzy +#: ../../source/how-to-configure-logging.rst:83 msgid "" -"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " -"or inspect the incoming ``Message`` and the resulting outgoing " -"``Message``. The signature for a ``Mod`` is as follows:" -msgstr "" -"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " -"``Message`` 。一个 ``Mod`` 的签名如下:" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." +msgstr "您可以通过添加更多与应用程序相关的信息来扩展 Flower 日志记录器默认显示的信息。您可以通过以下方法轻松实现这一目标。" -#: ../../source/how-to-use-built-in-mods.rst:18 -#, fuzzy -msgid "A typical mod function might look something like this:" -msgstr "一个典型的修改函数可能是这样的:" +#: ../../source/how-to-configure-logging.rst:114 +msgid "" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "这样,除默认信息外,您的日志记录器还将显示由客户引入的信息,如上文所述。" -#: ../../source/how-to-use-built-in-mods.rst:31 -#, fuzzy -msgid "Using Mods" -msgstr "使用修改器" +#: ../../source/how-to-configure-logging.rst:140 +msgid "Log to a remote service" +msgstr "登录远程服务" -#: ../../source/how-to-use-built-in-mods.rst:33 +#: ../../source/how-to-configure-logging.rst:142 #, fuzzy -msgid "To use mods in your ``ClientApp``, you can follow these steps:" -msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" - -#: ../../source/how-to-use-built-in-mods.rst:36 -#, fuzzy -msgid "1. Import the required mods" -msgstr "1. 导入所需修改" - -#: ../../source/how-to-use-built-in-mods.rst:38 -#, fuzzy -msgid "First, import the built-in mod you intend to use:" -msgstr "首先,导入您打算使用的内置模式:" +msgid "" +"The ``fl.common.logger.configure`` function, also allows specifying a " +"host to which logs can be pushed (via ``POST``) through a native Python " +"``logging.handler.HTTPHandler``. This is a particularly useful feature in" +" ``gRPC``-based Federated Learning workloads where otherwise gathering " +"logs from all entities (i.e. the server and the clients) might be " +"cumbersome. Note that in Flower simulation, the server automatically " +"displays all logs. You can still specify a ``HTTPHandler`` should you " +"wish to backup or analyze the logs somewhere else." +msgstr "" +"此外,:code:`fl.common.logger.configure`函数还允许指定主机,通过本地 Python " +":code:`logging.handler.HTTPHandler`,向该主机推送日志(通过 :code:`POST`)。在基于 " +":code:`gRPC` 的联邦学习工作负载中,这是一个特别有用的功能,否则从所有实体(即服务器和客户端)收集日志可能会很麻烦。请注意,在 " +"Flower 模拟器中,服务器会自动显示所有日志。如果希望在其他地方备份或分析日志,仍可指定 :code:`HTTPHandler`。" -#: ../../source/how-to-use-built-in-mods.rst:46 -#, fuzzy -msgid "2. Define your client function" -msgstr "2. 定义客户功能" +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "启用 SSL 连接" -#: ../../source/how-to-use-built-in-mods.rst:48 +#: ../../source/how-to-enable-ssl-connections.rst:4 #, fuzzy msgid "" -"Define your client function (``client_fn``) that will be wrapped by the " -"mod(s):" -msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" +"This guide describes how to a SSL-enabled secure Flower server " +"(``SuperLink``) can be started and how a Flower client (``SuperNode``) " +"can establish a secure connections to it." +msgstr "本指南介绍如何启动启用 SSL 的安全 Flower 服务器,以及 Flower 客户端如何与其建立安全连接。" -#: ../../source/how-to-use-built-in-mods.rst:57 -#, fuzzy -msgid "3. Create the ``ClientApp`` with mods" -msgstr "3. 用模块创建 ``ClientApp``" +#: ../../source/how-to-enable-ssl-connections.rst:8 +msgid "" +"A complete code example demonstrating a secure connection can be found " +"`here `_." +msgstr "" +"有关安全连接的完整代码示例,请参见 `_ 。" -#: ../../source/how-to-use-built-in-mods.rst:59 +#: ../../source/how-to-enable-ssl-connections.rst:11 #, fuzzy msgid "" -"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " -"argument. The order in which you provide the mods matters:" -msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" +"The code example comes with a ``README.md`` file which explains how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how it does so. Stick to this guide for a deeper " +"introduction to the topic." +msgstr "代码示例附带的 README.md 文件将解释如何启动它。虽然它已经启用了 SSL,但对如何启用可能描述较少。请参考本指南,了解更深入的相关介绍。" + +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "证书" -#: ../../source/how-to-use-built-in-mods.rst:72 +#: ../../source/how-to-enable-ssl-connections.rst:18 #, fuzzy -msgid "Order of execution" -msgstr "停用" +msgid "" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in ``examples/advanced-" +"tensorflow/certificates/generate.sh`` with the following command " +"sequence:" +msgstr "" +"使用支持 SSL 的连接需要向服务器和客户端传递证书。在本指南中,我们将生成自签名证书。由于这可能会变得相当复杂,我们将要求你运行 " +":code:`examples/advanced-tensorflow/certificates/generate.sh` 中的脚本" -#: ../../source/how-to-use-built-in-mods.rst:74 +#: ../../source/how-to-enable-ssl-connections.rst:29 #, fuzzy msgid "" -"When the ``ClientApp`` runs, the mods are executed in the order they are " -"provided in the list:" -msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" +"This will generate the certificates in ``examples/advanced-" +"tensorflow/.cache/certificates``." +msgstr "这将在 :code:`examples/advanced-tensorflow/.cache/certificates` 中生成证书。" -#: ../../source/how-to-use-built-in-mods.rst:76 +#: ../../source/how-to-enable-ssl-connections.rst:32 #, fuzzy -msgid "``example_mod_1`` (outermost mod)" -msgstr "``example_mod_1`` (最外层模块)" +msgid "" +"The approach for generating SSL certificates in the context of this " +"example can serve as an inspiration and starting point, but it should not" +" be used as a reference for production environments. Please refer to " +"other sources regarding the issue of correctly generating certificates " +"for production environments. For non-critical prototyping or research " +"projects, it might be sufficient to use the self-signed certificates " +"generated using the scripts mentioned in this guide." +msgstr "本示例中生成 SSL 证书的方法可作为启发和起点,但不应被视为生产环境的完整方法。有关在生产环境中正确生成证书的问题,请参考其他资料。" -#: ../../source/how-to-use-built-in-mods.rst:77 +#: ../../source/how-to-enable-ssl-connections.rst:40 #, fuzzy -msgid "``example_mod_2`` (next mod)" -msgstr "示例模式 2(下一个模式)" +msgid "Server (SuperLink)" +msgstr "flower-superlink" -#: ../../source/how-to-use-built-in-mods.rst:78 +#: ../../source/how-to-enable-ssl-connections.rst:42 #, fuzzy msgid "" -"Message handler (core function that handles the incoming ``Message`` and " -"returns the outgoing ``Message``)" -msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" +"Use the following terminal command to start a sever (SuperLink) that uses" +" the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-use-built-in-mods.rst:79 +#: ../../source/how-to-enable-ssl-connections.rst:52 #, fuzzy -msgid "``example_mod_2`` (on the way back)" -msgstr "``example_mod_2`` (返回途中)" +msgid "" +"When providing certificates, the server expects a tuple of three " +"certificates paths: CA certificate, server certificate and server private" +" key." +msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" -#: ../../source/how-to-use-built-in-mods.rst:80 +#: ../../source/how-to-enable-ssl-connections.rst:56 #, fuzzy -msgid "``example_mod_1`` (outermost mod on the way back)" -msgstr "``example_mod_1`` (返回途中最外层的模式)" +msgid "Client (SuperNode)" +msgstr "客户端状态代码。" -#: ../../source/how-to-use-built-in-mods.rst:82 +#: ../../source/how-to-enable-ssl-connections.rst:58 #, fuzzy msgid "" -"Each mod has a chance to inspect and modify the incoming ``Message`` " -"before passing it to the next mod, and likewise with the outgoing " -"``Message`` before returning it up the stack." -msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" +"Use the following terminal command to start a client (SuperNode) that " +"uses the previously generated certificates:" +msgstr "现在我们将演示如何编写一个客户端,使用之前生成的脚本:" -#: ../../source/how-to-use-built-in-mods.rst:87 +#: ../../source/how-to-enable-ssl-connections.rst:67 #, fuzzy msgid "" -"By following this guide, you have learned how to effectively use mods to " -"enhance your ``ClientApp``'s functionality. Remember that the order of " -"mods is crucial and affects how the input and output are processed." +"When setting ``root_certificates``, the client expects a file path to " +"PEM-encoded root certificates." msgstr "" -"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " -"的顺序至关重要,它会影响输入和输出的处理方式。" +"当设置 :code:`root_certificates` 时,客户端希望 PEM 编码的根证书是字节字符串。我们再次使用 " +":code:`Path` 来简化以字节字符串形式读取证书的过程。" -#: ../../source/how-to-use-built-in-mods.rst:89 +#: ../../source/how-to-enable-ssl-connections.rst:73 #, fuzzy -msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" -msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" +msgid "" +"You should now have learned how to generate self-signed certificates " +"using the given script, start an SSL-enabled server and have a client " +"establish a secure connection to it." +msgstr "现在,你应该已经学会了如何使用给定的脚本生成自签名证书、启动启用 SSL 的服务器并让客户端与其建立安全连接。" -#: ../../source/how-to-use-differential-privacy.rst:2 -#, fuzzy -msgid "Use Differential Privacy" -msgstr "差分隐私" +#: ../../source/how-to-enable-ssl-connections.rst:78 +msgid "Additional resources" +msgstr "补充资源" -#: ../../source/how-to-use-differential-privacy.rst:3 -#, fuzzy +#: ../../source/how-to-enable-ssl-connections.rst:80 msgid "" -"This guide explains how you can utilize differential privacy in the " -"Flower framework. If you are not yet familiar with differential privacy, " -"you can refer to :doc:`explanation-differential-privacy`." -msgstr "" -"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" -"differential-privacy` 。" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "如果您想更深入地了解证书主题,这些额外的资料来源可能有帮助:" -#: ../../source/how-to-use-differential-privacy.rst:7 -#, fuzzy +#: ../../source/how-to-enable-ssl-connections.rst:83 +msgid "`Let's Encrypt `_" +msgstr "`让我们加密 `_" + +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "`certbot `_" +msgstr "`certbot `_" + +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "实施策略" + +#: ../../source/how-to-implement-strategies.rst:4 msgid "" -"Differential Privacy in Flower is in a preview phase. If you plan to use " -"these features in a production environment with sensitive data, feel free" -" contact us to discuss your requirements and to receive guidance on how " -"to best use these features." +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." msgstr "" -"Flower " -"中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" +"策略抽象类可以实现完全定制的策略。策略基本上就是在服务器上运行的联邦学习算法。策略决定如何对客户端进行采样、如何配置客户端进行训练、如何聚合参数更新以及如何评估模型。Flower" +" 提供了一些内置策略,这些策略基于下文所述的相同 API。" -#: ../../source/how-to-use-differential-privacy.rst:12 +#: ../../source/how-to-implement-strategies.rst:11 #, fuzzy -msgid "" -"This approach consists of two seprate phases: clipping of the updates and" -" adding noise to the aggregated model. For the clipping phase, Flower " -"framework has made it possible to decide whether to perform clipping on " -"the server side or the client side." -msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" +msgid "The ``Strategy`` abstraction" +msgstr ":code:`策略 ` 抽象类" -#: ../../source/how-to-use-differential-privacy.rst:15 +#: ../../source/how-to-implement-strategies.rst:13 #, fuzzy msgid "" -"**Server-side Clipping**: This approach has the advantage of the server " -"enforcing uniform clipping across all clients' updates and reducing the " -"communication overhead for clipping values. However, it also has the " -"disadvantage of increasing the computational load on the server due to " -"the need to perform the clipping operation for all clients." +"All strategy implementation are derived from the abstract base class " +"``flwr.server.strategy.Strategy``, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." msgstr "" -"** 服务器端剪切**: " -"这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" +"所有策略实现均源自抽象基类 " +":code:`flwr.server.strategy.Strategy`,包括内置实现和第三方实现。这意味着自定义策略实现与内置实现具有完全相同的功能。" + +#: ../../source/how-to-implement-strategies.rst:18 +msgid "" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "策略抽象定义了一些需要实现的抽象方法:" -#: ../../source/how-to-use-differential-privacy.rst:16 +#: ../../source/how-to-implement-strategies.rst:67 #, fuzzy msgid "" -"**Client-side Clipping**: This approach has the advantage of reducing the" -" computational overhead on the server. However, it also has the " -"disadvantage of lacking centralized control, as the server has less " -"control over the clipping process." -msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" +"Creating a new strategy means implementing a new ``class`` (derived from " +"the abstract base class ``Strategy``) that implements for the previously " +"shown abstract methods:" +msgstr "创建一个新策略意味着要实现一个新的 :code:`class`(从抽象基类 :code:`Strategy` 派生),该类要实现前面显示的抽象方法:" -#: ../../source/how-to-use-differential-privacy.rst:21 +#: ../../source/how-to-implement-strategies.rst:97 +msgid "The Flower server calls these methods in the following order:" +msgstr "Flower 服务器按以下顺序调用这些方法:" + +#: ../../source/how-to-implement-strategies.rst:174 +msgid "The following sections describe each of those methods in more detail." +msgstr "下文将详细介绍每种方法。" + +#: ../../source/how-to-implement-strategies.rst:177 #, fuzzy -msgid "Server-side Clipping" -msgstr "服务器端逻辑" +msgid "The ``initialize_parameters`` method" +msgstr ":code:`初始化参数` 方法" -#: ../../source/how-to-use-differential-privacy.rst:22 +#: ../../source/how-to-implement-strategies.rst:179 #, fuzzy msgid "" -"For central DP with server-side clipping, there are two :code:`Strategy` " -"classes that act as wrappers around the actual :code:`Strategy` instance " -"(for example, :code:`FedAvg`). The two wrapper classes are " -":code:`DifferentialPrivacyServerSideFixedClipping` and " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and " -"adaptive clipping." +"``initialize_parameters`` is called only once, at the very beginning of " +"an execution. It is responsible for providing the initial global model " +"parameters in a serialized form (i.e., as a ``Parameters`` object)." msgstr "" -"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " -":code:`FedAvg`)的包装器。这两个封装类分别是 " -":code:`DifferentialPrivacyServerSideFixedClipping` 和 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" +":code:`initialize_parameters` 只调用一次,即在执行开始时。它负责以序列化形式(即 " +":code:`Parameters` 对象)提供初始全局模型参数。" -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:183 #, fuzzy -msgid "server side clipping" -msgstr "服务器端逻辑" +msgid "" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +"``FedAvg``:" +msgstr "内置策略会返回用户提供的初始参数。下面的示例展示了如何将初始参数传递给 :code:`FedAvg`:" -#: ../../source/how-to-use-differential-privacy.rst:31 +#: ../../source/how-to-implement-strategies.rst:209 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use server-" -"side fixed clipping using the " -":code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. The " -"same approach can be used with " -":code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the " -"corresponding input parameters." +"The Flower server will call ``initialize_parameters``, which either " +"returns the parameters that were passed to ``initial_parameters``, or " +"``None``. If no parameters are returned from ``initialize_parameters`` " +"(i.e., ``None``), the server will randomly select one client and ask it " +"to provide its parameters. This is a convenience feature and not " +"recommended in practice, but it can be useful for prototyping. In " +"practice, it is recommended to always use server-side parameter " +"initialization." msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " -":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " -":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" +"Flower 服务器将调用 :code:`initialize_parameters`,返回传给 " +":code:`initial_parameters` 的参数或 :code:`None`。如果 " +":code:`initialize_parameters` 没有返回任何参数(即 " +":code:`None`),服务器将随机选择一个客户端并要求其提供参数。这只是一个便捷的功能,在实际应用中并不推荐使用,但在原型开发中可能很有用。在实践中,建议始终使用服务器端参数初始化。" + +#: ../../source/how-to-implement-strategies.rst:218 +msgid "" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." +msgstr "服务器端参数初始化是一种强大的机制。例如,它可以用来从先前保存的检查点恢复训练。它也是实现混合方法所需的基本能力,例如,使用联邦学习对预先训练好的模型进行微调。" -#: ../../source/how-to-use-differential-privacy.rst:52 +#: ../../source/how-to-implement-strategies.rst:224 #, fuzzy -msgid "Client-side Clipping" -msgstr "客户端逻辑" +msgid "The ``configure_fit`` method" +msgstr ":code:`configure_fit`方法" -#: ../../source/how-to-use-differential-privacy.rst:53 +#: ../../source/how-to-implement-strategies.rst:226 #, fuzzy msgid "" -"For central DP with client-side clipping, the server sends the clipping " -"value to selected clients on each round. Clients can use existing Flower " -":code:`Mods` to perform the clipping. Two mods are available for fixed " -"and adaptive client-side clipping: :code:`fixedclipping_mod` and " -":code:`adaptiveclipping_mod` with corresponding server-side wrappers " -":code:`DifferentialPrivacyClientSideFixedClipping` and " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`." +"``configure_fit`` is responsible for configuring the upcoming round of " +"training. What does *configure* mean in this context? Configuring a round" +" means selecting clients and deciding what instructions to send to these " +"clients. The signature of ``configure_fit`` makes this clear:" msgstr "" -"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " -":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " -":code:`adaptiveclipping_mod`,以及相应的服务器端封装 " -":code:`DifferentialPrivacyClientSideFixedClipping` 和 " -":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" - -#: ../../source/how-to-use-differential-privacy.rst:-1 -#, fuzzy -msgid "client side clipping" -msgstr "客户端逻辑" +":code:`configure_fit` " +"负责配置即将开始的一轮训练。*配置*在这里是什么意思?配置一轮训练意味着选择客户并决定向这些客户发送什么指令。:code:`configure_fit`" +" 说明了这一点:" -#: ../../source/how-to-use-differential-privacy.rst:63 +#: ../../source/how-to-implement-strategies.rst:239 #, fuzzy msgid "" -"The code sample below enables the :code:`FedAvg` strategy to use " -"differential privacy with client-side fixed clipping using both the " -":code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on " -"the client, :code:`fixedclipping_mod`:" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_fit``:" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_fit` 中执行以下步骤:" + +#: ../../source/how-to-implement-strategies.rst:243 +#: ../../source/how-to-implement-strategies.rst:307 +#, fuzzy +msgid "" +"Use the ``client_manager`` to randomly sample all (or a subset of) " +"available clients (each represented as a ``ClientProxy`` object)" msgstr "" -"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " -":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" +"使用 :code:`client_manager` 随机抽样所有(或部分)可用客户端(每个客户端都表示为 :code:`ClientProxy` " +"对象)" -#: ../../source/how-to-use-differential-privacy.rst:80 +#: ../../source/how-to-implement-strategies.rst:245 #, fuzzy msgid "" -"In addition to the server-side strategy wrapper, the :code:`ClientApp` " -"needs to configure the matching :code:`fixedclipping_mod` to perform the " -"client-side clipping:" +"Pair each ``ClientProxy`` with the same ``FitIns`` holding the current " +"global model ``parameters`` and ``config`` dict" msgstr "" -"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " -"以执行客户端剪切:" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`FitIns` 配对" -#: ../../source/how-to-use-differential-privacy.rst:97 +#: ../../source/how-to-implement-strategies.rst:248 #, fuzzy msgid "" -"To utilize local differential privacy (DP) and add noise to the client " -"model parameters before transmitting them to the server in Flower, you " -"can use the `LocalDpMod`. The following hyperparameters need to be set: " -"clipping norm value, sensitivity, epsilon, and delta." +"More sophisticated implementations can use ``configure_fit`` to implement" +" custom client selection logic. A client will only participate in a round" +" if the corresponding ``ClientProxy`` is included in the list returned " +"from ``configure_fit``." msgstr "" -"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " -"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" +"更复杂的实现可以使用 :code:`configure_fit` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_fit` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-use-differential-privacy.rst:-1 +#: ../../source/how-to-implement-strategies.rst:254 #, fuzzy -msgid "local DP mod" -msgstr "本地 DP 模式" +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." +msgstr "" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略成为可能,例如在不同的客户端上训练不同的模型,或在不同的客户端上使用不同的超参数(通过" +" :code:`config` dict)。" -#: ../../source/how-to-use-differential-privacy.rst:104 +#: ../../source/how-to-implement-strategies.rst:261 #, fuzzy -msgid "Below is a code example that shows how to use :code:`LocalDpMod`:" -msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" +msgid "The ``aggregate_fit`` method" +msgstr ":code:`aggregate_fit` 方法" -#: ../../source/how-to-use-differential-privacy.rst:122 +#: ../../source/how-to-implement-strategies.rst:263 #, fuzzy msgid "" -"Please note that the order of mods, especially those that modify " -"parameters, is important when using multiple modifiers. Typically, " -"differential privacy (DP) modifiers should be the last to operate on " -"parameters." -msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" +"``aggregate_fit`` is responsible for aggregating the results returned by " +"the clients that were selected and asked to train in ``configure_fit``." +msgstr ":code:`aggregate_fit` 负责汇总在 :code:`configure_fit` 中选择并要求训练的客户端所返回的结果。" -#: ../../source/how-to-use-differential-privacy.rst:125 +#: ../../source/how-to-implement-strategies.rst:277 #, fuzzy -msgid "Local Training using Privacy Engines" -msgstr "使用隐私引擎进行本地培训" +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_fit``). ``aggregate_fit`` therefore receives a list of " +"``results``, but also a list of ``failures``." +msgstr "" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 :code:`configure_fit`)的所有客户端获得结果。因此 " +":code:`aggregate_fit` 会收到 :code:`results` 的列表,但也会收到 :code:`failures` 的列表。" -#: ../../source/how-to-use-differential-privacy.rst:126 +#: ../../source/how-to-implement-strategies.rst:282 #, fuzzy msgid "" -"For ensuring data instance-level privacy during local model training on " -"the client side, consider leveraging privacy engines such as Opacus and " -"TensorFlow Privacy. For examples of using Flower with these engines, " -"please refer to the Flower examples directory (`Opacus " -"`_, `Tensorflow" -" Privacy `_)." +"``aggregate_fit`` returns an optional ``Parameters`` object and a " +"dictionary of aggregated metrics. The ``Parameters`` return value is " +"optional because ``aggregate_fit`` might decide that the results provided" +" are not sufficient for aggregation (e.g., too many failures)." msgstr "" -"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" -" 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " -"`_, `Tensorflow" -" Privacy `_)。" +":code:`aggregate_fit` 返回一个可选的 :code:`Parameters` " +"对象和一个聚合度量的字典。:code:`Parameters` 返回值是可选的,因为 :code:`aggregate_fit` " +"可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-use-strategies.rst:2 -msgid "Use strategies" -msgstr "使用策略" +#: ../../source/how-to-implement-strategies.rst:288 +#, fuzzy +msgid "The ``configure_evaluate`` method" +msgstr ":code:`configure_evaluate`方法" -#: ../../source/how-to-use-strategies.rst:4 +#: ../../source/how-to-implement-strategies.rst:290 +#, fuzzy msgid "" -"Flower allows full customization of the learning process through the " -":code:`Strategy` abstraction. A number of built-in strategies are " -"provided in the core framework." -msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" +"``configure_evaluate`` is responsible for configuring the upcoming round " +"of evaluation. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of ``configure_evaluate`` makes this clear:" +msgstr "" +":code:`configure_evaluate` " +"负责配置下一轮评估。*配置*在这里是什么意思?配置一轮评估意味着选择客户端并决定向这些客户端发送什么指令。:code:`configure_evaluate`" +" 说明了这一点:" -#: ../../source/how-to-use-strategies.rst:6 +#: ../../source/how-to-implement-strategies.rst:303 +#, fuzzy msgid "" -"There are three ways to customize the way Flower orchestrates the " -"learning process on the server side:" -msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" - -#: ../../source/how-to-use-strategies.rst:8 -msgid "Use an existing strategy, for example, :code:`FedAvg`" -msgstr "使用现有策略,例如 :code:`FedAvg`" - -#: ../../source/how-to-use-strategies.rst:9 -#: ../../source/how-to-use-strategies.rst:40 -msgid "Customize an existing strategy with callback functions" -msgstr "使用回调函数定制现有策略" - -#: ../../source/how-to-use-strategies.rst:10 -#: ../../source/how-to-use-strategies.rst:87 -msgid "Implement a novel strategy" -msgstr "实施新策略" - -#: ../../source/how-to-use-strategies.rst:14 -msgid "Use an existing strategy" -msgstr "使用现有策略" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in ``configure_evaluate``:" +msgstr "返回值是一个元组列表,每个元组代表将发送到特定客户端的指令。策略实现通常在 :code:`configure_evaluate` 中执行以下步骤:" -#: ../../source/how-to-use-strategies.rst:16 +#: ../../source/how-to-implement-strategies.rst:309 +#, fuzzy msgid "" -"Flower comes with a number of popular federated learning strategies " -"built-in. A built-in strategy can be instantiated as follows:" -msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" +"Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the " +"current global model ``parameters`` and ``config`` dict" +msgstr "" +"将每个 :code:`ClientProxy` 与持有当前全局模型 :code:`parameters` 和 :code:`config` " +"dict 的 :code:`EvaluateIns` 配对" -#: ../../source/how-to-use-strategies.rst:25 +#: ../../source/how-to-implement-strategies.rst:312 +#, fuzzy msgid "" -"This creates a strategy with all parameters left at their default values " -"and passes it to the :code:`start_server` function. It is usually " -"recommended to adjust a few parameters during instantiation:" -msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" +"More sophisticated implementations can use ``configure_evaluate`` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding ``ClientProxy`` is included in the list " +"returned from ``configure_evaluate``." +msgstr "" +"更复杂的实现可以使用 :code:`configure_evaluate` 来实现自定义的客户端选择逻辑。只有当相应的 " +":code:`ClientProxy` 包含在 :code:`configure_evaluate` 返回的列表中时,客户端才会参与进来。" -#: ../../source/how-to-use-strategies.rst:42 +#: ../../source/how-to-implement-strategies.rst:318 +#, fuzzy msgid "" -"Existing strategies provide several ways to customize their behaviour. " -"Callback functions allow strategies to call user-provided code during " -"execution." -msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the ``config`` dict)." +msgstr "" +"该返回值的结构为用户提供了很大的灵活性。由于指令是按客户端定义的,因此可以向每个客户端发送不同的指令。这使得自定义策略可以在不同客户端上评估不同的模型,或在不同客户端上使用不同的超参数(通过" +" :code:`config` dict)。" -#: ../../source/how-to-use-strategies.rst:45 -msgid "Configuring client fit and client evaluate" -msgstr "配置客户匹配和客户评估" +#: ../../source/how-to-implement-strategies.rst:325 +#, fuzzy +msgid "The ``aggregate_evaluate`` method" +msgstr ":code:`aggregate_evaluate` 方法" -#: ../../source/how-to-use-strategies.rst:47 +#: ../../source/how-to-implement-strategies.rst:327 +#, fuzzy msgid "" -"The server can pass new configuration values to the client each round by " -"providing a function to :code:`on_fit_config_fn`. The provided function " -"will be called by the strategy and must return a dictionary of " -"configuration key values pairs that will be sent to the client. It must " -"return a dictionary of arbitrary configuration values :code:`client.fit`" -" and :code:`client.evaluate` functions during each round of federated " -"learning." +"``aggregate_evaluate`` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +"``configure_evaluate``." msgstr "" -"服务器可以通过向 :code:`on_fit_config_fn` " -"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" -" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" +":code:`aggregate_evaluate` 负责汇总在 :code:`configure_evaluate` " +"中选择并要求评估的客户端返回的结果。" -#: ../../source/how-to-use-strategies.rst:75 +#: ../../source/how-to-implement-strategies.rst:341 +#, fuzzy msgid "" -"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " -"values from server to client, and poetentially change these values each " -"round, for example, to adjust the learning rate. The client will receive " -"the dictionary returned by the :code:`on_fit_config_fn` in its own " -":code:`client.fit()` function." +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +"``configure_evaluate``). ``aggregate_evaluate`` therefore receives a list" +" of ``results``, but also a list of ``failures``." msgstr "" -":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " -":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" +"当然,失败是有可能发生的,因此无法保证服务器会从它发送指令(通过 " +":code:`configure_evaluate`)的所有客户端获得结果。因此, :code:`aggregate_evaluate` 会接收 " +":code:`results` 的列表,但也会接收 :code:`failures` 的列表。" -#: ../../source/how-to-use-strategies.rst:78 +#: ../../source/how-to-implement-strategies.rst:346 +#, fuzzy msgid "" -"Similar to :code:`on_fit_config_fn`, there is also " -":code:`on_evaluate_config_fn` to customize the configuration sent to " -":code:`client.evaluate()`" +"``aggregate_evaluate`` returns an optional ``float`` (loss) and a " +"dictionary of aggregated metrics. The ``float`` return value is optional " +"because ``aggregate_evaluate`` might decide that the results provided are" +" not sufficient for aggregation (e.g., too many failures)." msgstr "" -"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " -":code:`client.evaluate()` 的配置" +":code:`aggregate_evaluate` 返回一个可选的 " +":code:`float`(损失值)和一个聚合指标字典。:code:`float` 返回值是可选的,因为 " +":code:`aggregate_evaluate` 可能会认为所提供的结果不足以进行聚合(例如,失败次数过多)。" -#: ../../source/how-to-use-strategies.rst:81 -msgid "Configuring server-side evaluation" -msgstr "配置服务器端评估" +#: ../../source/how-to-implement-strategies.rst:352 +#, fuzzy +msgid "The ``evaluate`` method" +msgstr ":code:`evaluate`方法" -#: ../../source/how-to-use-strategies.rst:83 +#: ../../source/how-to-implement-strategies.rst:354 +#, fuzzy msgid "" -"Server-side evaluation can be enabled by passing an evaluation function " -"to :code:`evaluate_fn`." -msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" +"``evaluate`` is responsible for evaluating model parameters on the " +"server-side. Having ``evaluate`` in addition to " +"``configure_evaluate``/``aggregate_evaluate`` enables strategies to " +"perform both servers-side and client-side (federated) evaluation." +msgstr "" +":code:`evaluate` 负责在服务器端评估模型参数。除了 " +":code:`configure_evaluate`/:code:`aggregate_evaluate` 之外,:code:`evaluate`" +" 可以使策略同时执行服务器端和客户端(联邦)评估。" -#: ../../source/how-to-use-strategies.rst:89 +#: ../../source/how-to-implement-strategies.rst:364 +#, fuzzy msgid "" -"Writing a fully custom strategy is a bit more involved, but it provides " -"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +"``evaluate`` method might not complete successfully (e.g., it might fail " +"to load the server-side evaluation data)." msgstr "" -"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " -"指南,了解更多信息。" - -#: ../../source/index.rst:34 -msgid "Tutorial" -msgstr "教程" - -#: ../../source/index.rst:44 -msgid "Quickstart tutorials" -msgstr "快速入门教程" +"返回值也是可选的,因为策略可能不需要执行服务器端评估,或者因为用户定义的 :code:`evaluate` " +"方法可能无法成功完成(例如,它可能无法加载服务器端评估数据)。" -#: ../../source/index.rst:74 ../../source/index.rst:78 -msgid "How-to guides" -msgstr "操作指南" +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "安装Flower" -#: ../../source/index.rst:99 -msgid "Legacy example guides" -msgstr "旧版指南范例" +#: ../../source/how-to-install-flower.rst:5 +msgid "Python version" +msgstr "Python 版本" -#: ../../source/index.rst:108 ../../source/index.rst:112 -msgid "Explanations" -msgstr "说明" +#: ../../source/how-to-install-flower.rst:11 +msgid "Install stable release" +msgstr "安装稳定版" -#: None:-1 -msgid "API reference" -msgstr "应用程序接口参考" +#: ../../source/how-to-install-flower.rst:14 +#: ../../source/how-to-upgrade-to-flower-next.rst:66 +#, fuzzy +msgid "Using pip" +msgstr "使用 pip" -#: ../../source/index.rst:137 -msgid "Reference docs" -msgstr "参考文档" +#: ../../source/how-to-install-flower.rst:16 +#, fuzzy +msgid "Stable releases are available on `PyPI `_:" +msgstr "稳定版本可在 `PyPI `_::" -#: ../../source/index.rst:153 -msgid "Contributor tutorials" -msgstr "贡献者教程" +#: ../../source/how-to-install-flower.rst:22 +#, fuzzy +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra:" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr`` 应与`simulation`` 一起安装:" -#: ../../source/index.rst:160 -msgid "Contributor how-to guides" -msgstr "投稿指南" +#: ../../source/how-to-install-flower.rst:30 +#, fuzzy +msgid "Using conda (or mamba)" +msgstr "使用 conda(或 mamba)" -#: ../../source/index.rst:172 -msgid "Contributor explanations" -msgstr "贡献者解释" +#: ../../source/how-to-install-flower.rst:32 +#, fuzzy +msgid "Flower can also be installed from the ``conda-forge`` channel." +msgstr "Flower 也可以从 ``conda-forge`` 频道安装。" -#: ../../source/index.rst:178 -msgid "Contributor references" -msgstr "贡献者参考资料" +#: ../../source/how-to-install-flower.rst:34 +#, fuzzy +msgid "" +"If you have not added ``conda-forge`` to your channels, you will first " +"need to run the following:" +msgstr "如果您尚未在频道中添加 ``conda-forge``,则首先需要运行以下程序::" -#: ../../source/index.rst:-1 +#: ../../source/how-to-install-flower.rst:42 +#, fuzzy msgid "" -"Check out the documentation of the main Flower Framework enabling easy " -"Python development for Federated Learning." -msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" +"Once the ``conda-forge`` channel has been enabled, ``flwr`` can be " +"installed with ``conda``:" +msgstr "一旦启用了 ``conda-forge`` 频道,就可以使用 ``conda``: 安装 ``flwr``:" -#: ../../source/index.rst:2 -msgid "Flower Framework Documentation" -msgstr "Flower 框架文档" +#: ../../source/how-to-install-flower.rst:49 +#, fuzzy +msgid "or with ``mamba``:" +msgstr "或用 ``mamba`` ::" -#: ../../source/index.rst:7 +#: ../../source/how-to-install-flower.rst:56 +msgid "Verify installation" +msgstr "验证安装" + +#: ../../source/how-to-install-flower.rst:58 +#, fuzzy msgid "" -"Welcome to Flower's documentation. `Flower `_ is a " -"friendly federated learning framework." -msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" +"The following command can be used to verify if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line:" +msgstr "可以使用以下命令来验证 Flower 是否安装成功。如果一切正常,它将在命令行中打印 Flower 的版本::" -#: ../../source/index.rst:11 -msgid "Join the Flower Community" -msgstr "加入 Flower 社区" +#: ../../source/how-to-install-flower.rst:68 +msgid "Advanced installation options" +msgstr "高级安装选项" -#: ../../source/index.rst:13 -msgid "" -"The Flower Community is growing quickly - we're a friendly group of " -"researchers, engineers, students, professionals, academics, and other " -"enthusiasts." -msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" +#: ../../source/how-to-install-flower.rst:71 +#, fuzzy +msgid "Install via Docker" +msgstr "安装Flower" -#: ../../source/index.rst:15 -msgid "Join us on Slack" -msgstr "在 Slack 上加入我们" +#: ../../source/how-to-install-flower.rst:73 +#, fuzzy +msgid ":doc:`Run Flower using Docker `" +msgstr "" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/index.rst:23 -msgid "Flower Framework" -msgstr "Flower 框架" +#: ../../source/how-to-install-flower.rst:76 +msgid "Install pre-release" +msgstr "安装预发布版本" -#: ../../source/index.rst:25 +#: ../../source/how-to-install-flower.rst:78 +#, fuzzy msgid "" -"The user guide is targeted at researchers and developers who want to use " -"Flower to bring existing machine learning workloads into a federated " -"setting. One of Flower's design goals was to make this simple. Read on to" -" learn more." -msgstr "" -"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " -"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens:" +msgstr "在稳定版发布之前,Flower 的新版本(可能是不稳定版)有时会作为预发布版本(alpha、beta、候选发布版本)提供::" -#: ../../source/index.rst:30 -msgid "Tutorials" -msgstr "教程" +#: ../../source/how-to-install-flower.rst:85 +#, fuzzy +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra:" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr``预发行版应与`simulation``一起安装:" -#: ../../source/index.rst:32 +#: ../../source/how-to-install-flower.rst:93 +msgid "Install nightly release" +msgstr "安装隔夜版本" + +#: ../../source/how-to-install-flower.rst:95 +#, fuzzy msgid "" -"A learning-oriented series of federated learning tutorials, the best " -"place to start." -msgstr "以学习为导向的联邦学习教程系列,最好的起点。" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases:" +msgstr "Flower 中最新(可能不稳定)的更改以隔夜发布的形式提供::" -#: ../../source/index.rst:61 +#: ../../source/how-to-install-flower.rst:101 #, fuzzy msgid "" -"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " -":doc:`TensorFlow ` | :doc:`🤗 Transformers" -" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " -"` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | " -":doc:`Android ` | :doc:`iOS `" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra:" +msgstr "对于使用虚拟客户端引擎的模拟,`flwr-nightly`应与`simulation`一起安装:" + +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "监控模拟" + +#: ../../source/how-to-monitor-simulation.rst:4 +msgid "" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." msgstr "" -"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " -"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " -":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" -":`scikit-learn ` | :doc:`XGBoost " -"` | :doc:`Android ` | :doc:`iOS `" +"Flower 允许您在运行模拟时监控系统资源。此外,Flower " +"仿真引擎功能强大,能让您决定如何按客户端方式分配资源并限制总使用量。从资源消耗中获得的观察可以帮助您做出更明智的决策,并加快执行时间。" -#: ../../source/index.rst:63 -msgid "We also made video tutorials for PyTorch:" -msgstr "我们还为 PyTorch 制作了视频教程:" +#: ../../source/how-to-monitor-simulation.rst:9 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." +msgstr "具体说明假定你使用的是 macOS,并且安装了 `Homebrew `_ 软件包管理器。" -#: ../../source/index.rst:68 -msgid "And TensorFlow:" -msgstr "还有 TensorFlow:" +#: ../../source/how-to-monitor-simulation.rst:13 +msgid "Downloads" +msgstr "下载" -#: ../../source/index.rst:76 +#: ../../source/how-to-monitor-simulation.rst:19 msgid "" -"Problem-oriented how-to guides show step-by-step how to achieve a " -"specific goal." -msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." +msgstr "" +"`Prometheus `_ 用于收集数据,而 `Grafana " +"`_ 则能让你将收集到的数据可视化。它们都与 Flower 在引擎下使用的 `Ray " +"`_ 紧密集成。" -#: ../../source/index.rst:110 +#: ../../source/how-to-monitor-simulation.rst:23 msgid "" -"Understanding-oriented concept guides explain and discuss key topics and " -"underlying ideas behind Flower and collaborative AI." -msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "重写配置文件(根据设备的不同,可能安装在不同的路径上)。" -#: ../../source/index.rst:120 -msgid "References" -msgstr "参考资料" +#: ../../source/how-to-monitor-simulation.rst:26 +msgid "If you are on an M1 Mac, it should be:" +msgstr "如果你使用的是 M1 Mac,应该是这样:" -#: ../../source/index.rst:122 -msgid "Information-oriented API reference and other reference material." -msgstr "以信息为导向的 API 参考资料和其他参考资料。" +#: ../../source/how-to-monitor-simulation.rst:33 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "在上一代英特尔 Mac 设备上,应该是这样:" -#: ../../source/index.rst:131::1 -#, fuzzy -msgid ":py:obj:`flwr `\\" -msgstr ":py:obj:`flwr `\\" +#: ../../source/how-to-monitor-simulation.rst:40 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "打开相应的配置文件并修改它们。根据设备情况,使用以下两个命令之一:" -#: ../../source/index.rst:131::1 flwr:1 of -#, fuzzy -msgid "Flower main package." -msgstr "Flower 主包装。" +#: ../../source/how-to-monitor-simulation.rst:51 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" +msgstr "然后删除文件中的所有文本,粘贴一个新的 Prometheus 配置文件,如下所示。您可以根据需要调整时间间隔:" -#: ../../source/index.rst:148 -msgid "Contributor docs" -msgstr "贡献者文档" +#: ../../source/how-to-monitor-simulation.rst:67 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" +msgstr "编辑完 Prometheus 配置后,请对 Grafana 配置文件执行同样的操作。与之前一样,使用以下命令之一打开这些文件:" -#: ../../source/index.rst:150 +#: ../../source/how-to-monitor-simulation.rst:78 msgid "" -"The Flower community welcomes contributions. The following docs are " -"intended to help along the way." -msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "您的终端编辑器应该会打开,并允许您像之前一样应用以下配置。" -#: ../../source/ref-api-cli.rst:2 -msgid "Flower CLI reference" -msgstr "Flower CLI 参考" +#: ../../source/how-to-monitor-simulation.rst:94 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "恭喜您,您刚刚下载了指标跟踪所需的所有软件。现在,让我们开始吧。" -#: ../../source/ref-api-cli.rst:7 -#, fuzzy -msgid "flower-simulation" -msgstr "运行模拟" +#: ../../source/how-to-monitor-simulation.rst:98 +msgid "Tracking metrics" +msgstr "跟踪指标" -#: ../../source/ref-api-cli.rst:17 -msgid "flower-superlink" -msgstr "flower-superlink" +#: ../../source/how-to-monitor-simulation.rst:100 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "在运行 Flower 模拟之前,您必须启动刚刚安装和配置的监控工具。" -#: ../../source/ref-api-cli.rst:27 -#, fuzzy -msgid "flower-client-app" -msgstr "Flower 客户端。" +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "开始模拟时,请在 Python 代码中加入以下参数。" -#: ../../source/ref-api-cli.rst:37 -#, fuzzy -msgid "flower-server-app" -msgstr "flower-driver-api" +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "Now, you are ready to start your workload." +msgstr "现在,您可以开始工作了。" -#: ../../source/ref-api/flwr.rst:2 -#, fuzzy -msgid "flwr" -msgstr "Flower" +#: ../../source/how-to-monitor-simulation.rst:121 +msgid "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "模拟启动后不久,您就会在终端中看到以下日志:" -#: ../../source/ref-api/flwr.client.rst:45 ../../source/ref-api/flwr.rst:25 -#: ../../source/ref-api/flwr.server.rst:49 +#: ../../source/how-to-monitor-simulation.rst:127 #, fuzzy -msgid "Modules" -msgstr "模块" +msgid "You can look at everything at http://127.0.0.1:8265 ." +msgstr "您可以在 ``_ 查看所有内容。" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.client `\\" -msgstr ":py:obj:`flwr.client `\\" +#: ../../source/how-to-monitor-simulation.rst:129 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "这是一个 Ray Dashboard。您可以导航到 \"度量标准\"(左侧面板,最低选项)。" -#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of -msgid "Flower client." -msgstr "Flower 客户端。" +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." +msgstr "" +"或者,您也可以点击右上角的 \"在 Grafana 中查看\",在 Grafana 中查看它们。请注意,Ray " +"仪表盘只能在模拟期间访问。模拟结束后,您只能使用 Grafana 浏览指标。您可以访问 ``http://localhost:3000/``启动 " +"Grafana。" -#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/how-to-monitor-simulation.rst:137 #, fuzzy -msgid ":py:obj:`flwr.common `\\" -msgstr ":py:obj:`flwr.common `\\" - -#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of -msgid "Common components shared between server and client." -msgstr "服务器和客户端共享的通用组件。" +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port ``3000`` on your" +" machine as long as they are running." +msgstr "完成可视化后,请停止 Prometheus 和 Grafana。这一点很重要,否则只要它们在运行,就会阻塞机器上的端口 :code:`3000`。" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.server `\\" -msgstr ":py:obj:`flwr.server `\\" +#: ../../source/how-to-monitor-simulation.rst:147 +msgid "Resource allocation" +msgstr "资源分配" -#: ../../source/ref-api/flwr.rst:35::1 -#: ../../source/ref-api/flwr.server.rst:38::1 flwr.server:1 -#: flwr.server.server.Server:1 of -msgid "Flower server." -msgstr "Flower 服务器。" +#: ../../source/how-to-monitor-simulation.rst:149 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "您必须了解 Ray 库是如何工作的,才能有效地为自己的仿真客户端分配系统资源。" -#: ../../source/ref-api/flwr.rst:35::1 -#, fuzzy -msgid ":py:obj:`flwr.simulation `\\" -msgstr ":py:obj:`flwr.simulation `\\" +#: ../../source/how-to-monitor-simulation.rst:152 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" +msgstr "" +"最初,模拟(由 Ray " +"在引擎下处理)默认使用系统上的所有可用资源启动,并在客户端之间共享。但这并不意味着它会将资源平均分配给所有客户端,也不意味着模型训练会在所有客户端同时进行。您将在本博客的后半部分了解到更多相关信息。您可以运行以下命令检查系统资源:" -#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of -#, fuzzy -msgid "Flower simulation." -msgstr "运行模拟" +#: ../../source/how-to-monitor-simulation.rst:164 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "在 Google Colab 中,您看到的结果可能与此类似:" -#: ../../source/ref-api/flwr.client.rst:2 -msgid "client" -msgstr "客户端" +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "不过,您可以覆盖默认值。开始模拟时,请执行以下操作(不必全部覆盖):" -#: ../../source/ref-api/flwr.client.mod.rst:13 -#: ../../source/ref-api/flwr.client.rst:13 -#: ../../source/ref-api/flwr.common.rst:13 -#: ../../source/ref-api/flwr.server.rst:13 -#: ../../source/ref-api/flwr.simulation.rst:13 -#, fuzzy -msgid "Functions" -msgstr "四种函数:" +#: ../../source/how-to-monitor-simulation.rst:195 +msgid "Let’s also specify the resource for a single client." +msgstr "我们还可以为单个客户指定资源。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_client_app `\\ \\(\\)" -msgstr ":py:obj:`run_client_app `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:225 +msgid "" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." +msgstr "现在到了关键部分。只有在资源允许的情况下,Ray 才会在拥有所有所需资源(如并行运行)时启动新客户端。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_client_app:1 of +#: ../../source/how-to-monitor-simulation.rst:228 #, fuzzy -msgid "Run Flower client app." -msgstr "Flower 客户端。" +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting ``client_num_gpus = 0.5`` would allow running " +"two clients and therefore enable them to run concurrently. Be careful not" +" to require more resources than available. If you specified " +"``client_num_gpus = 2``, the simulation wouldn't start (even if you had 2" +" GPUs but decided to set 1 in ``ray_init_args``)." +msgstr "" +"在上面的示例中,将只运行一个客户端,因此您的客户端不会并发运行。设置 :code:`client_num_gpus = 0.5` " +"将允许运行两个客户端,从而使它们能够并发运行。请注意,所需的资源不要超过可用资源。如果您指定 :code:`client_num_gpus = " +"2`,模拟将无法启动(即使您有 2 个 GPU,但决定在 :code:`ray_init_args` 中设置为 1)。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#, fuzzy -msgid ":py:obj:`run_supernode `\\ \\(\\)" -msgstr ":py:obj:`run_superlink `\\ \\(\\)" +#: ../../source/how-to-monitor-simulation.rst:235 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "常见问题" + +#: ../../source/how-to-monitor-simulation.rst:237 +msgid "Q: I don't see any metrics logged." +msgstr "问:我没有看到任何指标记录。" + +#: ../../source/how-to-monitor-simulation.rst:239 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." +msgstr "答:时间范围可能没有正确设置。设置在右上角(默认为 \"最后 30 分钟\")。请更改时间框架,以反映模拟运行的时间段。" + +#: ../../source/how-to-monitor-simulation.rst:243 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." +msgstr "问:我看到 \"未检测到 Grafana 服务器。请确保 Grafana 服务器正在运行并刷新此页面\"。" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.supernode.app.run_supernode:1 of +#: ../../source/how-to-monitor-simulation.rst:246 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "答:您可能没有运行 Grafana。请检查正在运行的服务" + +#: ../../source/how-to-monitor-simulation.rst:252 #, fuzzy -msgid "Run Flower SuperNode." -msgstr "Flower 服务器。" +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"http://127.0.0.1:8265." +msgstr "问:在访问 ``_时,我看到 \"无法访问该网站\"。" + +#: ../../source/how-to-monitor-simulation.rst:254 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "答:要么模拟已经完成,要么您还需要启动Prometheus。" + +#: ../../source/how-to-monitor-simulation.rst:257 +msgid "Resources" +msgstr "资源" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-monitor-simulation.rst:259 #, fuzzy msgid "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-" +"started.html" +msgstr "Ray 仪表盘: ``_" + +#: ../../source/how-to-monitor-simulation.rst:261 +#, fuzzy +msgid "Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html" msgstr "" -":py:obj:`start_client `\\ \\(\\*\\, " -"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +"Ray 指标: ``_" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_client:1 of -msgid "Start a Flower client node which connects to a Flower server." -msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "运行模拟" + +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." +msgstr "" +"模拟联邦学习工作负载可用于多种案例:您可能希望在大量客户端上运行您的工作负载,但无需采购、配置和管理大量物理设备;您可能希望在您可以访问的计算系统上尽可能快地运行您的" +" FL 工作负载,而无需经过复杂的设置过程;您可能希望在不同数据和系统异构性、客户端可用性、隐私预算等不同水平的场景中验证您的算法。这些都是模拟 " +"FL 工作负载的一些案例。Flower 可以通过其 \"虚拟客户端引擎\"(VirtualClientEngine)_或 VCE 来匹配这些情况。" -#: ../../source/ref-api/flwr.client.rst:25::1 +#: ../../source/how-to-run-simulations.rst:19 #, fuzzy msgid "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +"The ``VirtualClientEngine`` schedules, launches and manages `virtual` " +"clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by " +"creating a class inheriting, for example, from `flwr.client.NumPyClient " +"`_ and therefore behave in an " +"identical way. In addition to that, clients managed by the " +"``VirtualClientEngine`` are:" msgstr "" -":py:obj:`start_numpy_client `\\ \\(\\*\\," -" server\\_address\\, client\\)" +":code:`VirtualClientEngine`用来规划,启动和管理`虚拟`客户端。这些客户端跟`非虚拟`客户端是一样的(即为您通过`flwr.client.start_client" +" `_启动的客户端),因为它们可以通过创建一个继承自 " +"`flwr.client.NumPyClient `_ " +"的类进行配置,因此其行为方式相同。另外,由 `VirtualClientEngine` 管理的客户端有:" -#: ../../source/ref-api/flwr.client.rst:25::1 -#: flwr.client.app.start_numpy_client:1 of -msgid "Start a Flower NumPyClient which connects to a gRPC server." -msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" +#: ../../source/how-to-run-simulations.rst:26 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" +"资源感知:这意味着每个客户端都会分配到系统中的一部分计算和内存。作为用户,您可以在模拟开始时对其进行控制,从而控制 Flower FL " +"模拟的并行程度。每个客户端的资源越少,在同一硬件上并发运行的客户端就越多。" -#: ../../source/ref-api/flwr.client.mod.rst:30 -#: ../../source/ref-api/flwr.client.rst:27 -#: ../../source/ref-api/flwr.common.rst:32 -#: ../../source/ref-api/flwr.server.rst:26 -#: ../../source/ref-api/flwr.server.strategy.rst:17 -#: ../../source/ref-api/flwr.server.workflow.rst:17 +#: ../../source/how-to-run-simulations.rst:31 #, fuzzy -msgid "Classes" -msgstr "类别" +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to ``VirtualClientEngine``'s " +"internals." +msgstr "自管理:这意味着用户无需手动启动客户端,而是由 :code:`VirtualClientEngine` 负责。" + +#: ../../source/how-to-run-simulations.rst:33 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." +msgstr "" +"即时性:这意味着客户端只有在 FL 进程中需要它时才会被实体化(例如执行 `fit() `_ " +")。之后该对象将被销毁,释放分配给它的资源,并允许其他客户端以这种方式参与。" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:38 #, fuzzy -msgid ":py:obj:`Client `\\ \\(\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" +msgid "" +"The ``VirtualClientEngine`` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's ``VirtualClientEngine`` makes use of " +"`Actors `_ to spawn " +"`virtual` clients and run their workload." +msgstr "" +":code:`VirtualClientEngine`使用`Ray " +"`_来实现`虚拟`客户端,这是一个用于可扩展 Python 工作负载的开源框架。特别地,Flower 的" +" :code:`VirtualClientEngine` 使用 `Actors `_ 来生成 `virtual` 客户端并运行它们的工作负载。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client.Client:1 of -msgid "Abstract base class for Flower clients." -msgstr "Flower 客户端的抽象基类。" +#: ../../source/how-to-run-simulations.rst:45 +msgid "Launch your Flower simulation" +msgstr "启动 Flower 模拟" -#: ../../source/ref-api/flwr.client.rst:34::1 -#, fuzzy +#: ../../source/how-to-run-simulations.rst:47 msgid "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"运行 Flower 模拟器仍然需要定义客户端类、策略以及下载和加载(可能还需要分割)数据集的实用程序。在完成这些工作后,就可以使用 " +"\"start_simulation `_\" 来启动模拟了,一个最简单的示例如下:" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.client_app.ClientApp:1 of +#: ../../source/how-to-run-simulations.rst:73 +msgid "VirtualClientEngine resources" +msgstr "虚拟客户端引擎资源" + +#: ../../source/how-to-run-simulations.rst:75 #, fuzzy -msgid "Flower ClientApp." -msgstr "Flower 客户端。" +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +"``ray_init_args`` input argument to ``start_simulation`` which the VCE " +"internally passes to Ray's ``ray.init`` command. For a complete list of " +"settings you can configure check the `ray.init " +"`_" +" documentation. Do not set ``ray_init_args`` if you want the VCE to use " +"all your system's CPUs and GPUs." +msgstr "" +"默认情况下,VCE 可以访问所有系统资源(即所有 CPU、所有 GPU 等),因为这也是启动 Ray " +"时的默认行为。不过,在某些设置中,您可能希望限制有多少系统资源用于模拟。您可以通过 :code:`ray_init_args` 输入到 " +":code:`start_simulation` 的参数来做到这一点,VCE 会在内部将该参数传递给 Ray 的 :code:`ray.init`" +" 命令。有关您可以配置的设置的完整列表,请查看 `ray.init `_ 文档。如果希望 VCE 使用系统中所有的 CPU 和 " +"GPU,请不要设置 :code:`ray_init_args`。" + +#: ../../source/how-to-run-simulations.rst:97 +msgid "Assigning client resources" +msgstr "分配客户端资源" -#: ../../source/ref-api/flwr.client.rst:34::1 +#: ../../source/how-to-run-simulations.rst:99 #, fuzzy -msgid ":py:obj:`NumPyClient `\\ \\(\\)" -msgstr ":py:obj:`NumPyClient `\\ \\(\\)" +msgid "" +"By default the ``VirtualClientEngine`` assigns a single CPU core (and " +"nothing else) to each virtual client. This means that if your system has " +"10 cores, that many virtual clients can be concurrently running." +msgstr "" +"默认情况下,:code:`VirtualClientEngine` 会为每个虚拟客户端分配一个 CPU " +"内核(不分配其他任何内核)。这意味着,如果系统有 10 个内核,那么可以同时运行这么多虚拟客户端。" -#: ../../source/ref-api/flwr.client.rst:34::1 -#: flwr.client.numpy_client.NumPyClient:1 of -msgid "Abstract base class for Flower clients using NumPy." -msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" +#: ../../source/how-to-run-simulations.rst:103 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" +"通常情况下,您可能希望根据 FL 工作负载的复杂性(即计算和内存占用)来调整分配给客户端的资源。您可以在启动模拟时将参数 " +"`client_resources` 设置为 `start_simulation `_ 。Ray " +"内部使用两个键来调度和生成工作负载(在我们的例子中是 Flower 客户端):" -#: ../../source/ref-api/flwr.client.rst:52::1 +#: ../../source/how-to-run-simulations.rst:110 #, fuzzy -msgid ":py:obj:`flwr.client.mod `\\" -msgstr ":py:obj:`flwr.client `\\" +msgid "``num_cpus`` indicates the number of CPU cores a client would get." +msgstr ":code:`num_cpus` 表示客户端将获得的 CPU 内核数量。" -#: ../../source/ref-api/flwr.client.rst:52::1 flwr.client.mod:1 of +#: ../../source/how-to-run-simulations.rst:111 #, fuzzy -msgid "Flower Built-in Mods." -msgstr "使用内置调制器" +msgid "``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned." +msgstr ":code:`num_gpus` 表示分配给客户端的 GPU 内存的**比例**。" -#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 -#: flwr.server.client_manager.ClientManager:1 -#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 -#: of +#: ../../source/how-to-run-simulations.rst:113 +msgid "Let's see a few examples:" +msgstr "让我们来看几个例子:" + +#: ../../source/how-to-run-simulations.rst:132 #, fuzzy -msgid "Bases: :py:class:`~abc.ABC`" -msgstr "Bases: :py:class:`~abc.ABC`" +msgid "" +"While the ``client_resources`` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +"``VirtualClientEngine`` will schedule 100 jobs to run (each simulating a " +"client sampled by the strategy) and then will execute them in a resource-" +"aware manner in batches of 8." +msgstr "" +"虽然 :code:`client_resources` 可用来控制 FL " +"模拟的并发程度,但这并不能阻止您在同一轮模拟中运行几十、几百甚至上千个客户端,并拥有数量级更多的 " +"\"休眠\"(即不参与一轮模拟)客户端。比方说,您希望每轮有 100 个客户端,但您的系统只能同时容纳 8 " +"个客户端。:code:`VirtualClientEngine` 将安排运行 100 " +"个工作(每个工作模拟策略采样的一个客户端),然后以资源感知的方式分批执行。" -#: ../../source/ref-api/flwr.client.Client.rst:15 -#: ../../source/ref-api/flwr.client.ClientApp.rst:15 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 -#: ../../source/ref-api/flwr.common.Array.rst:15 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Context.rst:15 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 -#: ../../source/ref-api/flwr.common.Error.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 -#: ../../source/ref-api/flwr.common.EventType.rst:15 -#: ../../source/ref-api/flwr.common.FitIns.rst:15 -#: ../../source/ref-api/flwr.common.FitRes.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 -#: ../../source/ref-api/flwr.common.Message.rst:15 -#: ../../source/ref-api/flwr.common.MessageType.rst:15 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 -#: ../../source/ref-api/flwr.common.Metadata.rst:15 -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 -#: ../../source/ref-api/flwr.common.Parameters.rst:15 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 -#: ../../source/ref-api/flwr.common.RecordSet.rst:15 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 -#: ../../source/ref-api/flwr.common.Status.rst:15 -#: ../../source/ref-api/flwr.server.ClientManager.rst:15 -#: ../../source/ref-api/flwr.server.Driver.rst:15 -#: ../../source/ref-api/flwr.server.History.rst:15 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 -#: ../../source/ref-api/flwr.server.Server.rst:15 -#: ../../source/ref-api/flwr.server.ServerApp.rst:15 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 -#, fuzzy -msgid "Methods" -msgstr "方法" +#: ../../source/how-to-run-simulations.rst:140 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." +msgstr "" +"要了解资源如何用于调度 FL 客户端以及如何定义自定义资源的所有复杂细节,请查看 `Ray 文档 " +"`_。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`evaluate `\\ \\(ins\\)" -msgstr ":py:obj:`evaluate `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:145 +msgid "Simulation examples" +msgstr "模拟示例" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.evaluate:1 -#: flwr.client.numpy_client.NumPyClient.evaluate:1 of -msgid "Evaluate the provided parameters using the locally held dataset." -msgstr "使用本地数据集评估所提供的参数。" +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" +msgstr "" +"在 Tensorflow/Keras 和 PyTorch 中进行 Flower 模拟的几个可随时运行的完整示例已在 `Flower 库 " +"`_ 中提供。您也可以在 Google Colab 上运行它们:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`fit `\\ \\(ins\\)" -msgstr ":py:obj:`fit `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:151 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" +"Tensorflow/Keras模拟 `_:100个客户端在MNIST上协作训练一个MLP模型。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.fit:1 of -msgid "Refine the provided parameters using the locally held dataset." -msgstr "利用本地数据集完善所提供的参数。" +#: ../../source/how-to-run-simulations.rst:154 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." +msgstr "" +"PyTorch 模拟 `_:100 个客户端在 MNIST 上协作训练一个 CNN 模型。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:159 +msgid "Multi-node Flower simulations" +msgstr "多节点 Flower 模拟" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_context:1 -#: flwr.client.numpy_client.NumPyClient.get_context:1 of +#: ../../source/how-to-run-simulations.rst:161 #, fuzzy -msgid "Get the run context from this client." -msgstr "评估客户端的反应。" +msgid "" +"Flower's ``VirtualClientEngine`` allows you to run FL simulations across " +"multiple compute nodes. Before starting your multi-node simulation ensure" +" that you:" +msgstr "Flower 的 :code:`VirtualClientEngine` 允许您在多个计算节点上运行 FL 模拟。在开始多节点模拟之前,请确保:" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_parameters `\\ \\(ins\\)" -msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:164 +msgid "Have the same Python environment in all nodes." +msgstr "所有节点都有相同的 Python 环境。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.get_parameters:1 -#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of -msgid "Return the current local model parameters." -msgstr "返回当前本地模型参数。" +#: ../../source/how-to-run-simulations.rst:165 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "在所有节点上都有一份代码副本(例如整个软件包)。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_properties `\\ \\(ins\\)" -msgstr ":py:obj:`get_properties `\\ \\(ins\\)" +#: ../../source/how-to-run-simulations.rst:166 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" +msgstr "在所有节点中都有一份数据集副本(更多相关信息请参阅 :ref:`模拟注意事项`)" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.get_properties:1 of -msgid "Return set of client's properties." -msgstr "返回客户端的属性集。" +#: ../../source/how-to-run-simulations.rst:168 +#, fuzzy +msgid "" +"Pass ``ray_init_args={\"address\"=\"auto\"}`` to `start_simulation `_ so the " +"``VirtualClientEngine`` attaches to a running Ray instance." +msgstr "" +"将 :code:`ray_init_args={\"address\"=\"auto\"}`传递给 `start_simulation `_ ,这样 " +":code:`VirtualClientEngine`就会连接到正在运行的 Ray 实例。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-run-simulations.rst:171 #, fuzzy -msgid ":py:obj:`set_context `\\ \\(context\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" +msgid "" +"Start Ray on you head node: on the terminal type ``ray start --head``. " +"This command will print a few lines, one of which indicates how to attach" +" other nodes to the head node." +msgstr "" +"在头部节点上启动 Ray:在终端上输入 :code:`raystart--" +"head`。该命令将打印几行输出,其中一行说明如何将其他节点连接到头部节点。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.client.Client.set_context:1 -#: flwr.client.numpy_client.NumPyClient.set_context:1 of +#: ../../source/how-to-run-simulations.rst:174 #, fuzzy -msgid "Apply a run context to this client." -msgstr "将运行上下文应用于该客户端。" +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +"``ray start --address='192.168.1.132:6379'``" +msgstr "" +"将其他节点附加到头部节点:复制启动头部后显示的命令,并在新节点的终端上执行:例如 :code:`ray start " +"--address='192.168.1.132:6379'`" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/how-to-run-simulations.rst:178 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "完成上述所有操作后,您就可以在头部节点上运行代码了,就像在单个节点上运行模拟一样。" + +#: ../../source/how-to-run-simulations.rst:181 #, fuzzy -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command ``ray stop`` in each node's terminal " +"(including the head node)." +msgstr "模拟结束后,如果要拆除集群,只需在每个节点(包括头部节点)的终端运行 :code:`ray stop` 命令即可。" -#: ../../source/ref-api/flwr.client.Client.rst:44::1 -#: flwr.client.client.Client.to_client:1 of -msgid "Return client (itself)." -msgstr "返回客户端(本身)。" +#: ../../source/how-to-run-simulations.rst:185 +msgid "Multi-node simulation good-to-know" +msgstr "了解多节点模拟" -#: ../../source/ref-api/flwr.client.Client.rst:46 -#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 -#: ../../source/ref-api/flwr.common.Array.rst:28 -#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 -#: ../../source/ref-api/flwr.common.Code.rst:19 -#: ../../source/ref-api/flwr.common.Context.rst:25 -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 -#: ../../source/ref-api/flwr.common.Error.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 -#: ../../source/ref-api/flwr.common.EventType.rst:165 -#: ../../source/ref-api/flwr.common.FitIns.rst:25 -#: ../../source/ref-api/flwr.common.FitRes.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 -#: ../../source/ref-api/flwr.common.Message.rst:37 -#: ../../source/ref-api/flwr.common.MessageType.rst:25 -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 -#: ../../source/ref-api/flwr.common.Metadata.rst:25 -#: ../../source/ref-api/flwr.common.Parameters.rst:25 -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 -#: ../../source/ref-api/flwr.common.RecordSet.rst:25 -#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 -#: ../../source/ref-api/flwr.common.Status.rst:25 -#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 -#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "在此,我们列举了运行多节点 FL 模拟时的一些有趣功能:" + +#: ../../source/how-to-run-simulations.rst:189 #, fuzzy -msgid "Attributes" -msgstr "属性" +msgid "" +"User ``ray status`` to check all nodes connected to your head node as " +"well as the total resources available to the ``VirtualClientEngine``." +msgstr "" +"使用 :code:`ray status` 查看连接到头部节点的所有节点,以及 :code:`VirtualClientEngine` " +"可用的总资源。" -#: flwr.client.client.Client.evaluate:1::1 of +#: ../../source/how-to-run-simulations.rst:192 #, fuzzy -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +"``VirtualClientEngine`` can schedule as many `virtual` clients as that " +"node can possible run. In some settings you might want to exclude certain" +" resources from the simulation. You can do this by appending `--num-" +"cpus=` and/or `--num-gpus=` in " +"any ``ray start`` command (including when starting the head)" +msgstr "" +"将新节点附加到头部节点时,头部节点将可见其所有资源(即所有 CPU 和 GPU)。这意味着 :code:`VirtualClientEngine`" +" 可以调度尽可能多的 \"虚拟 \"客户端来运行该节点。在某些设置中,您可能希望将某些资源排除在模拟之外。为此,您可以在任何 :code:`ray" +" start` 命令(包括启动头部时)中添加 `--num-cpus=`和/或 `--num-" +"gpus=`" -#: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.mod.localdp_mod.LocalDpMod -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.context.Context flwr.common.message.Error -#: flwr.common.message.Message flwr.common.message.Message.create_error_reply -#: flwr.common.message.Message.create_reply flwr.common.message.Metadata -#: flwr.common.record.parametersrecord.Array flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.ClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.unregister -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.bulyan.Bulyan -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.fedadagrad.FedAdagrad -#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg -#: flwr.server.strategy.fedavg_android.FedAvgAndroid -#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt -#: flwr.server.strategy.fedprox.FedProx -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg -#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow -#: flwr.simulation.app.start_simulation -#: flwr.simulation.run_simulation.run_simulation of -msgid "Parameters" -msgstr "参数" +#: ../../source/how-to-run-simulations.rst:202 +msgid "Considerations for simulations" +msgstr "模拟的注意事项" -#: flwr.client.client.Client.evaluate:3 of +#: ../../source/how-to-run-simulations.rst:206 msgid "" -"The evaluation instructions containing (global) model parameters received" -" from the server and a dictionary of configuration values used to " -"customize the local evaluation process." -msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" - -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.evaluate -#: flwr.client.numpy_client.NumPyClient.fit -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Returns" -msgstr "返回" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "我们正在积极开展这些方面的工作,以便使 FL 工作负载与 Flower 模拟的运行变得轻而易举。" -#: flwr.client.client.Client.evaluate:8 of +#: ../../source/how-to-run-simulations.rst:209 msgid "" -"The evaluation result containing the loss on the local dataset and other " -"details such as the number of local data examples used for evaluation." -msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." +msgstr "" +"当前的 VCE 允许您在模拟模式下运行联邦学习工作负载,无论您是在个人笔记本电脑上建立简单的场景原型,还是要在多个高性能 GPU 节点上训练复杂的" +" FL情景。虽然我们为 VCE 增加了更多的功能,但以下几点强调了在使用 Flower 设计 FL " +"时需要注意的一些事项。我们还强调了我们的实现中目前存在的一些局限性。" -#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit -#: flwr.client.client.Client.get_parameters -#: flwr.client.client.Client.get_properties -#: flwr.client.numpy_client.NumPyClient.get_parameters -#: flwr.client.numpy_client.NumPyClient.get_properties -#: flwr.common.message.Message.create_reply flwr.server.app.start_server -#: flwr.server.client_manager.ClientManager.num_available -#: flwr.server.client_manager.ClientManager.register -#: flwr.server.client_manager.SimpleClientManager.num_available -#: flwr.server.client_manager.SimpleClientManager.register -#: flwr.server.client_manager.SimpleClientManager.wait_for -#: flwr.server.driver.driver.Driver.create_message -#: flwr.server.driver.driver.Driver.pull_messages -#: flwr.server.driver.driver.Driver.push_messages -#: flwr.server.driver.driver.Driver.send_and_receive -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate -#: flwr.server.strategy.strategy.Strategy.aggregate_fit -#: flwr.server.strategy.strategy.Strategy.configure_evaluate -#: flwr.server.strategy.strategy.Strategy.configure_fit -#: flwr.server.strategy.strategy.Strategy.evaluate -#: flwr.server.strategy.strategy.Strategy.initialize_parameters -#: flwr.simulation.app.start_simulation of -msgid "Return type" -msgstr "返回类型" +#: ../../source/how-to-run-simulations.rst:217 +msgid "GPU resources" +msgstr "GPU 资源" -#: flwr.client.client.Client.fit:3 of +#: ../../source/how-to-run-simulations.rst:219 +#, fuzzy msgid "" -"The training instructions containing (global) model parameters received " -"from the server and a dictionary of configuration values used to " -"customize the local training process." -msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" +"The VCE assigns a share of GPU memory to a client that specifies the key " +"``num_gpus`` in ``client_resources``. This being said, Ray (used " +"internally by the VCE) is by default:" +msgstr "" +"VCE 会为指定 :code:`client_resources` 中 :code:`num_gpus` 关键字的客户端分配 GPU " +"内存份额。也就是说,Ray(VCE 内部使用)是默认的:" -#: flwr.client.client.Client.fit:8 of +#: ../../source/how-to-run-simulations.rst:222 +#, fuzzy msgid "" -"The training result containing updated parameters and other details such " -"as the number of local training examples used for training." -msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set ``num_gpus=0.5`` and you have two GPUs in your system with different" +" (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients " +"concurrently." +msgstr "" +"不知道 GPU 上可用的总 VRAM。这意味着,如果您设置 :code:`num_gpus=0.5`,而系统中有两个不同(如 32GB 和 " +"8GB)VRAM 的 GPU,它们都将同时运行 2 个客户端。" -#: flwr.client.client.Client.get_parameters:3 of +#: ../../source/how-to-run-simulations.rst:225 msgid "" -"The get parameters instructions received from the server containing a " -"dictionary of configuration values." -msgstr "从服务器接收的获取参数指令包含配置值字典。" - -#: flwr.client.client.Client.get_parameters:7 of -msgid "The current local model parameters." -msgstr "当前的本地模型参数。" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" +msgstr "不知道 GPU 上正在运行其他无关(即不是由 VCE 创建)的工作负载。从中可以得到以下两点启示:" -#: flwr.client.client.Client.get_properties:3 of +#: ../../source/how-to-run-simulations.rst:228 msgid "" -"The get properties instructions received from the server containing a " -"dictionary of configuration values." -msgstr "从服务器接收的获取属性指令包含配置值字典。" - -#: flwr.client.client.Client.get_properties:7 of -msgid "The current client properties." -msgstr "当前客户端属性。" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" +msgstr "" +"您的 Flower 服务器可能需要 GPU 来评估聚合后的 \"全局模型\"(例如在使用 \"评估方法\"`_时)" -#: ../../source/ref-api/flwr.client.ClientApp.rst:2 +#: ../../source/how-to-run-simulations.rst:231 #, fuzzy -msgid "ClientApp" -msgstr "客户端" +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +"``CUDA_VISIBLE_DEVICES=\"\"`` when launching your experiment." +msgstr "" +"如果您想在同一台机器上运行多个独立的 Flower 模拟,则需要在启动实验时使用 " +":code:`CUDA_VISIBLE_DEVICES=\"\"` 屏蔽 GPU。" -#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 -#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 -#: flwr.common.context.Context:1 flwr.common.message.Error:1 -#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 -#: flwr.common.record.parametersrecord.Array:1 -#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 -#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 -#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 -#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 -#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 -#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 -#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 -#: flwr.common.typing.Status:1 flwr.server.history.History:1 -#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 -#: flwr.server.server_config.ServerConfig:1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of +#: ../../source/how-to-run-simulations.rst:235 #, fuzzy -msgid "Bases: :py:class:`object`" -msgstr "Bases: :py:class:`object`" +msgid "" +"In addition, the GPU resource limits passed to ``client_resources`` are " +"not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." +msgstr "" +"此外,传递给 :code:`client_resources` 的 GPU 资源限制并不是 \"强制 \"的(即可以超出),这可能导致客户端使用的" +" VRAM 超过启动模拟时指定的比例。" -#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 -#: flwr.client.client_app.ClientApp:4 -#: flwr.client.client_app.ClientApp.evaluate:4 -#: flwr.client.client_app.ClientApp.query:4 -#: flwr.client.client_app.ClientApp.train:4 -#: flwr.client.mod.localdp_mod.LocalDpMod:22 flwr.server.app.start_server:41 -#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 -#: of -msgid "Examples" -msgstr "实例" +#: ../../source/how-to-run-simulations.rst:240 +msgid "TensorFlow with GPUs" +msgstr "使用 GPU 的 TensorFlow" -#: flwr.client.client_app.ClientApp:5 of -#, fuzzy +#: ../../source/how-to-run-simulations.rst:242 msgid "" -"Assuming a typical `Client` implementation named `FlowerClient`, you can " -"wrap it in a `ClientApp` as follows:" -msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 `ClientApp` 中,如下所示:" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." +msgstr "" +"在 TensorFlow `_ 中使用 GPU 时,几乎所有进程可见的" +" GPU 内存都将被映射。TensorFlow 这样做是出于优化目的。然而,在 FL 模拟等设置中,我们希望将 GPU 分割成多个 \"虚拟 " +"\"客户端,这并不是一个理想的机制。幸运的是,我们可以通过 `启用内存增长 " +"`_来禁用这一默认行为。" -#: flwr.client.client_app.ClientApp:16 of +#: ../../source/how-to-run-simulations.rst:249 #, fuzzy msgid "" -"If the above code is in a Python module called `client`, it can be " -"started as follows:" -msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动它:" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +"``actor_kwargs`` we can pass the reserved key `\"on_actor_init_fn\"` in " +"order to specify a function to be executed upon actor initialization. In " +"this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" +"这需要在主进程(也就是服务器运行的地方)和 VCE 创建的每个角色中完成。通过 " +":code:`actor_kwargs`,我们可以传递保留关键字`\"on_actor_init_fn\"`,以指定在角色初始化时执行的函数。在本例中,为了使" +" TF 工作负载的 GPU 增长,它看起来如下:" -#: flwr.client.client_app.ClientApp:21 of -#, fuzzy +#: ../../source/how-to-run-simulations.rst:272 msgid "" -"In this `client:app` example, `client` refers to the Python module " -"`client.py` in which the previous code lives in and `app` refers to the " -"global attribute `app` that points to an object of type `ClientApp`." +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." msgstr "" -"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client.py`,而 `app` " -"指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" +"这正是 \"Tensorflow/Keras 模拟 " +"`_\"示例中使用的机制。" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`evaluate `\\ \\(\\)" -msgstr ":py:obj:`evaluate `\\ \\(\\)" - -#: flwr.client.client_app.ClientApp.evaluate:1 -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid "Return a decorator that registers the evaluate fn with the client app." -msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" +#: ../../source/how-to-run-simulations.rst:276 +msgid "Multi-node setups" +msgstr "多节点设置" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`query `\\ \\(\\)" -msgstr ":py:obj:`query `\\ \\(\\)" +#: ../../source/how-to-run-simulations.rst:278 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." +msgstr "" +"VCE 目前不提供控制特定 \"虚拟 " +"\"客户端在哪个节点上执行的方法。换句话说,如果不止一个节点拥有客户端运行所需的资源,那么这些节点中的任何一个都可能被调度到客户端工作负载上。在 " +"FL " +"进程的稍后阶段(即在另一轮中),同一客户端可以由不同的节点执行。根据客户访问数据集的方式,这可能需要在所有节点上复制所有数据集分区,或采用数据集服务机制(如使用" +" nfs 或数据库)来避免数据重复。" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.query:1 of -#, fuzzy -msgid "Return a decorator that registers the query fn with the client app." -msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" +#: ../../source/how-to-run-simulations.rst:286 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." +msgstr "" +"根据定义,虚拟客户端是 \"无状态 \"的,因为它们具有即时性。客户机状态可以作为 Flower " +"客户机类的一部分来实现,但用户需要确保将其保存到持久存储(如数据库、磁盘)中,而且无论客户机在哪个节点上运行,都能在以后检索到。这也与上述观点有关,因为在某种程度上,客户端的数据集可以被视为一种" +" \"状态\"。" -#: flwr.client.client_app.ClientApp.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`train `\\ \\(\\)" -msgstr "server.strategy.Strategy" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "保存和加载模型检查点" -#: flwr.client.client_app.ClientApp.evaluate:1::1 -#: flwr.client.client_app.ClientApp.train:1 of -#, fuzzy -msgid "Return a decorator that registers the train fn with the client app." -msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." +msgstr "Flower 不会在服务器端自动保存模型更新。本指南将介绍在 Flower 中保存(和加载)模型检查点的步骤。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 -msgid "NumPyClient" -msgstr "NumPyClient" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "模型检查点" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 #, fuzzy msgid "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" +"Model updates can be persisted on the server-side by customizing " +"``Strategy`` methods. Implementing custom strategies is always an option," +" but for many cases it may be more convenient to simply customize an " +"existing strategy. The following code example defines a new " +"``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` " +"strategy. In particular, it customizes ``aggregate_fit`` by calling " +"``aggregate_fit`` in the base class (``FedAvg``). It then continues to " +"save returned (aggregated) weights before it returns those aggregated " +"weights to the caller (i.e., the server):" msgstr "" -":py:obj:`evaluate `\\ \\(parameters\\, " -"config\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.fit:1 of -msgid "Train the provided parameters using the locally held dataset." -msgstr "使用本地数据集训练所提供的参数。" +"模型更新可通过自定义 :code:`Strategy` " +"方法在服务器端持久化。实现自定义策略始终是一种选择,但在许多情况下,简单地自定义现有策略可能更方便。下面的代码示例定义了一个新的 " +":code:`SaveModelStrategy`,它自定义了现有的内置 :code:`FedAvg` " +"策略。特别是,它通过调用基类(:code:`FedAvg`)中的 :code:`aggregate_fit` 来定制 " +":code:`aggregate_fit`。然后继续保存返回的(聚合)参数,然后再将这些聚合参数返回给调用者(即服务器):" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`get_context `\\ \\(\\)" -msgstr ":py:obj:`get_context `\\ \\(\\)" +#: ../../source/how-to-save-and-load-model-checkpoints.rst:53 +msgid "Save and load PyTorch checkpoints" +msgstr "保存和加载 PyTorch 检查点" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy +#: ../../source/how-to-save-and-load-model-checkpoints.rst:55 msgid "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." msgstr "" -":py:obj:`get_parameters `\\ " -"\\(config\\)" +"与前面的例子类似,但多了几个步骤,我们将展示如何存储一个 PyTorch 检查点,我们将使用 ``torch.save`` " +"函数。首先,``aggregate_fit`` 返回一个 ``Parameters`` 对象,它必须被转换成一个 NumPy " +"``ndarray`` 的列表,然后这些对象按照 ``OrderedDict`` 类结构被转换成 PyTorch `state_dict` 对象。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: ../../source/how-to-save-and-load-model-checkpoints.rst:98 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" +msgstr "要加载进度,只需在代码中添加以下几行。请注意,这将遍历所有已保存的检查点,并加载最新的检查点:" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:111 #, fuzzy msgid "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"Return/use this object of type ``Parameters`` wherever necessary, such as" +" in the ``initial_parameters`` when defining a ``Strategy``." msgstr "" -":py:obj:`get_properties `\\ " -"\\(config\\)" +"在必要时返回/使用此 ``Parameters`` 类型的对象,例如在定义 ``Strategy` 时的 " +"``initial_parameters` 中。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.get_properties:1 of -msgid "Return a client's set of properties." -msgstr "返回客户端的属性集。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "升级至 Flower 1.0" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 msgid "" -":py:obj:`set_context `\\ " -"\\(context\\)" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." msgstr "" -":py:obj:`set_context `\\ " -"\\(context\\)" - -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#, fuzzy -msgid ":py:obj:`to_client `\\ \\(\\)" -msgstr ":py:obj:`to_client `\\ \\(\\)" +"Flower 1.0 正式发布。除了新功能,Flower 1.0 还为未来的发展奠定了稳定的基础。与 Flower 0.19(以及其他 0.x " +"系列版本)相比,有一些破坏性改动需要修改现有 0.x 系列项目的代码。" -#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 -#: flwr.client.numpy_client.NumPyClient.to_client:1 of -msgid "Convert to object to Client type and return it." -msgstr "将对象转换为客户类型并返回。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +#: ../../source/how-to-upgrade-to-flower-next.rst:63 +msgid "Install update" +msgstr "安装更新" -#: flwr.client.numpy_client.NumPyClient.evaluate:1::1 of -#, fuzzy -msgid ":py:obj:`context `\\" -msgstr ":py:obj:`context `\\" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: flwr.client.numpy_client.NumPyClient.evaluate:3 -#: flwr.client.numpy_client.NumPyClient.fit:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 -#: flwr.server.strategy.strategy.Strategy.configure_fit:5 -#: flwr.server.strategy.strategy.Strategy.evaluate:8 of -msgid "The current (global) model parameters." -msgstr "当前(全局)模型参数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "pip: add ``-U`` when installing." +msgstr "pip: 安装时添加 ``-U``." -#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:16 msgid "" -"Configuration parameters which allow the server to influence evaluation " -"on the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to influence the number of examples " -"used for evaluation." -msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" +msgstr "`python -m pip install -U flwr``(当使用`start_server`和`start_client`时)" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 msgid "" -"* **loss** (*float*) -- The evaluation loss of the model on the local " -"dataset. * **num_examples** (*int*) -- The number of examples used for " -"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " -"arbitrary string keys to values of type bool, bytes, float, int, or " -"str. It can be used to communicate arbitrary values back to the server." -msgstr "" -"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " -"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" +"``python -m pip install -U 'flwr[simulation]'`` (when using " +"``start_simulation``)" +msgstr "``python -m pip install -U 'flwr[simulation]'``(当使用`start_simulation``时)" -#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 msgid "" -"**loss** (*float*) -- The evaluation loss of the model on the local " -"dataset." -msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: flwr.client.numpy_client.NumPyClient.evaluate:12 of -msgid "**num_examples** (*int*) -- The number of examples used for evaluation." -msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:23 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "``flwr = \"^1.0.0\"`` (当使用 ``start_server` 和 ``start_client` 时)" -#: flwr.client.numpy_client.NumPyClient.evaluate:13 -#: flwr.client.numpy_client.NumPyClient.fit:13 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 msgid "" -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can be " -"used to communicate arbitrary values back to the server." +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" msgstr "" -"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " -"str 类型值的字典。它可用于将任意值传回服务器。" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] " +"}``(当使用``start_simulation``时)" -#: flwr.client.numpy_client.NumPyClient.evaluate:19 of -msgid "" -"The previous return type format (int, float, float) and the extended " -"format (int, float, float, Dict[str, Scalar]) have been deprecated and " -"removed since Flower 0.19." -msgstr "" -"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," -" Scalar])已被弃用和移除。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:28 +#: ../../source/how-to-upgrade-to-flower-next.rst:121 +msgid "Required changes" +msgstr "所需变更" -#: flwr.client.numpy_client.NumPyClient.fit:5 of -msgid "" -"Configuration parameters which allow the server to influence training on " -"the client. It can be used to communicate arbitrary values from the " -"server to the client, for example, to set the number of (local) training " -"epochs." -msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:30 +msgid "The following breaking changes require manual updates." +msgstr "以下更改需要手动更新。" -#: flwr.client.numpy_client.NumPyClient.fit:11 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "General" +msgstr "一般情况" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:35 msgid "" -"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " -"**num_examples** (*int*) -- The number of examples used for training. * " -"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " -"string keys to values of type bool, bytes, float, int, or str. It can " -"be used to communicate arbitrary values back to the server." -msgstr "" -"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " -"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " -"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" - -#: flwr.client.numpy_client.NumPyClient.fit:11 of -msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." -msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" - -#: flwr.client.numpy_client.NumPyClient.fit:12 of -msgid "**num_examples** (*int*) -- The number of examples used for training." -msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" - -#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of -msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which parameters are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" - -#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of -msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." -msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "将所有参数作为关键字参数传递(而不是位置参数)。下面是一个例子:" -#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:38 msgid "" -"Configuration parameters requested by the server. This can be used to " -"tell the client which properties are needed along with some Scalar " -"attributes." -msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" +msgstr "Flower 0.19 (位置参数): ``start_client(\"127.0.0.1:8080\", FlowerClient())``" -#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 msgid "" -"**properties** -- A dictionary mapping arbitrary string keys to values of" -" type bool, bytes, float, int, or str. It can be used to communicate " -"arbitrary property values back to the server." +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" msgstr "" -"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " -"类型值的字典。它可用于将任意属性值传回服务器。" +"Flower 1.0(关键字参数): ``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" -#: ../../source/ref-api/flwr.client.mod.rst:2 -#, fuzzy -msgid "mod" -msgstr "模块" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +#: ../../source/ref-api/flwr.client.Client.rst:2 +msgid "Client" +msgstr "客户端" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 msgid "" -":py:obj:`adaptiveclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" msgstr "" +"NumPyClient的子类:将``def get_parameters(self):```改为``def " +"get_parameters(self,config):``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of -#, fuzzy -msgid "Client-side adaptive clipping modifier." -msgstr "客户端逻辑" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 msgid "" -":py:obj:`fixedclipping_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" msgstr "" +"客户端 \"的子类:将 \"get_parameters(self): \"改为 \"get_parameters(self, ins: " +"GetParametersIns):\"" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of -#, fuzzy -msgid "Client-side fixed clipping modifier." -msgstr "客户端逻辑" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy -msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" -msgstr ":py:obj:`Client `\\ \\(\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "策略 / ``start_server`` / ``start_simulation``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:53 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" msgstr "" +"向 ``start_server`` 和 ``start_simulation` 传递 ``ServerConfig``(而不是 " +"dictionary)。下面是一个例子:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:56 msgid "" -":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " -"call\\_next\\)" -msgstr ":py:obj:`set_context `\\ \\(context\\)" - -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of -msgid "Handle incoming message and return results, following the SecAgg protocol." +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" msgstr "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 msgid "" -":py:obj:`secaggplus_mod `\\ \\(msg\\, " -"ctxt\\, call\\_next\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" msgstr "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 msgid "" -"Handle incoming message and return results, following the SecAgg+ " -"protocol." -msgstr "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" +msgstr "将`start_simulation``中的`num_rounds=1``替换为新的`config=ServerConfig(...)`(参见前一项)" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:63 msgid "" -":py:obj:`message_size_mod `\\ \\(msg\\," -" ctxt\\, call\\_next\\)" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"删除调用 ``start_server`` 时的 ``force_final_distributed_eval` " +"参数。可以通过配置策略,在最后一轮训练后对所有客户端进行抽样评估,从而启用对所有客户端的分布式评估。" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.message_size_mod:1 of -#, fuzzy -msgid "Message size mod." -msgstr "信息类型。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:66 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "重命名参数/数组转换函数:" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#, fuzzy -msgid "" -":py:obj:`parameters_size_mod `\\ " -"\\(msg\\, ctxt\\, call\\_next\\)" -msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "``parameters_to_weights`` --> ``parameters_to_ndarrays``" -#: ../../source/ref-api/flwr.client.mod.rst:28::1 -#: flwr.client.mod.comms_mods.parameters_size_mod:1 of -#, fuzzy -msgid "Parameters size mod." -msgstr "参数" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:69 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "``weights_to_parameters`` --> ``ndarrays_to_parameters``" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 msgid "" -":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," -" sensitivity\\, ...\\)" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." msgstr "" -":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " -"mods\\]\\)" +"策略初始化:如果策略依赖于 ``fraction_fit`` 和 ``fraction_evaluate`` 的默认值,请手动将 " +"``fraction_fit`` 和 ``fraction_evaluate`` 设置为 ``0.1``。未手动创建策略的项目(调用 " +"``start_server` 或 ``start_simulation` 时未传递策略实例)现在应手动初始化 FedAvg,并将 " +"`fraction_fit` 和 `fraction_evaluate` 设为 `0.1``。" -#: ../../source/ref-api/flwr.client.mod.rst:35::1 -#: flwr.client.mod.localdp_mod.LocalDpMod:1 of -#, fuzzy -msgid "Modifier for local differential privacy." -msgstr "差分隐私" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "重命名内置策略参数(例如,`FedAvg``):" -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 -#, fuzzy -msgid "LocalDpMod" -msgstr "本地 DP 模式" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:79 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "``fraction_eval`` --> ``fraction_evaluate``" -#: flwr.client.mod.localdp_mod.LocalDpMod:3 of -msgid "" -"This mod clips the client model updates and adds noise to the params " -"before sending them to the server." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "``min_eval_clients`` --> ``min_evaluate_clients``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 -#: flwr.client.mod.localdp_mod.LocalDpMod:6 of -msgid "It operates on messages of type `MessageType.TRAIN`." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:81 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "``eval_fn`` --> ``evaluate_fn``" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:83 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." msgstr "" +"将 `rnd` 更名为 `server_round`。这会影响多个方法和函数,例如 " +"``configure_fit``、``aggregate_fit``、``configure_evaluate``、`aggregate_evaluate``" +" 和 ``evaluate_fn``。" -#: flwr.client.mod.localdp_mod.LocalDpMod:8 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 -#: of -#, fuzzy -msgid "The value of the clipping norm." -msgstr "削波法线的值。" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:86 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "在 ``evaluate_fn` 中添加 ``server_round` 和 ``config`:" -#: flwr.client.mod.localdp_mod.LocalDpMod:10 of -msgid "The sensitivity of the client model." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:88 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.mod.localdp_mod.LocalDpMod:12 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:90 msgid "" -"The privacy budget. Smaller value of epsilon indicates a higher level of " -"privacy protection." +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" msgstr "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" -#: flwr.client.mod.localdp_mod.LocalDpMod:15 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:94 +msgid "Custom strategies" +msgstr "定制策略" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:96 msgid "" -"The failure probability. The probability that the privacy mechanism fails" -" to provide the desired level of privacy. A smaller value of delta " -"indicates a stricter privacy guarantee." +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" msgstr "" +"参数``failures``的类型已从``List[BaseException]``变为``List[Union[Tuple[ClientProxy," +" FitRes], " +"BaseException]]``(在``agregate_fit``中)和``List[Union[Tuple[ClientProxy, " +"EvaluateRes], BaseException]]``(在``agregate_evaluate``中)" -#: flwr.client.mod.localdp_mod.LocalDpMod:23 of -msgid "Create an instance of the local DP mod and add it to the client-side mods:" -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:100 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "``Strategy``方法 的``evaluate``现在会接收当前一轮联邦学习/评估作为第一个参数:" -#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 -msgid "adaptiveclipping\\_mod" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:103 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" msgstr "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:```" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of -#, fuzzy +#: ../../source/how-to-upgrade-to-flower-1.0.rst:105 msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " -"wrapper." -msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of -#, fuzzy -msgid "The wrapper sends the clipping_norm value to the client." -msgstr "向客户发送近端因子mu" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:109 +msgid "Optional improvements" +msgstr "可选的改进措施" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of -msgid "This mod clips the client model updates before sending them to the server." -msgstr "" +#: ../../source/how-to-upgrade-to-flower-1.0.rst:111 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "除了上述必要的改动之外,还有一些潜在的改进措施:" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of +#: ../../source/how-to-upgrade-to-flower-1.0.rst:114 msgid "" -"It also sends KEY_NORM_BIT to the server for computing the new clipping " -"value." +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empty placeholder implementations of ``evaluate`` are no longer " +"necessary." msgstr "" +"删除 ``Client`` 或 ``NumPyClient`` 子类中的 \"占位符 " +"\"方法。例如,如果你使用服务器端评估,那么就不再需要``evaluate``的 \"空占位符 \"实现。" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 -#: flwr.server.driver.driver.Driver.send_and_receive:18 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:53 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:60 -#: of -#, fuzzy -msgid "Notes" -msgstr "无" - -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of -msgid "Consider the order of mods when using multiple." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:117 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" msgstr "" +"通过 ``start_simulation`` 配置循环超时: ``start_simulation(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of -msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +#: ../../source/how-to-upgrade-to-flower-1.0.rst:121 +#: ../../source/how-to-upgrade-to-flower-next.rst:349 +msgid "Further help" +msgstr "更多帮助" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:123 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questions, `join the Flower Slack " +"`_ and use the channel ``#questions``." msgstr "" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" -#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:2 #, fuzzy -msgid "fixedclipping\\_mod" -msgstr "剪贴" +msgid "Upgrade to Flower Next" +msgstr "升级至 Flower 1.0" -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:4 #, fuzzy msgid "" -"This mod needs to be used with the " -"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." -msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" - -#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of -msgid "Typically, fixedclipping_mod should be the last to operate on params." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 -msgid "make\\_ffn" -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 -msgid "message\\_size\\_mod" +"Welcome to the migration guide for updating Flower to Flower Next! " +"Whether you're a seasoned user or just getting started, this guide will " +"help you smoothly transition your existing setup to take advantage of the" +" latest features and improvements in Flower Next, starting from version " +"1.8." msgstr "" +"欢迎阅读从 Flower 升级到 Flower Next 的迁移指南!无论您是经验丰富的用户还是刚刚开始使用 " +"Flower,本指南都将帮助您顺利过渡现有设置,以利用 Flower Next 从 1.8 版开始的最新功能和改进。" -#: flwr.client.mod.comms_mods.message_size_mod:3 of -msgid "This mod logs the size in bytes of the message being transmited." +#: ../../source/how-to-upgrade-to-flower-next.rst:11 +#, fuzzy +msgid "" +"This guide shows how to reuse pre-``1.8`` Flower code with minimum code " +"changes by using the *compatibility layer* in Flower Next. In another " +"guide, we will show how to run Flower Next end-to-end with pure Flower " +"Next APIs." msgstr "" +"本指南展示了如何通过使用 Flower Next 中的*可兼容层*,以最小的代码改动重用```1.8```前的 Flower " +"代码。在另一个指南中,我们将介绍如何使用纯 Flower Next API 端到端运行 Flower Next。" -#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:15 #, fuzzy -msgid "parameters\\_size\\_mod" -msgstr "参数" +msgid "Let's dive in!" +msgstr "让我们深入了解一下!" -#: flwr.client.mod.comms_mods.parameters_size_mod:3 of +#: ../../source/how-to-upgrade-to-flower-next.rst:68 +#, fuzzy msgid "" -"This mod logs the number of parameters transmitted in the message as well" -" as their size in bytes." -msgstr "" - -#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 -msgid "secagg\\_mod" -msgstr "" +"Here's how to update an existing installation of Flower to Flower Next " +"with ``pip``:" +msgstr "下面介绍如何使用 pip 或 Poetry 将现有安装更新到 Flower 1.0:" -#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:74 #, fuzzy -msgid "secaggplus\\_mod" -msgstr "工作流程" +msgid "or if you need Flower Next with simulation:" +msgstr "启动 Flower 模拟" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:80 #, fuzzy -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" +msgid "" +"Ensure you set the following version constraint in your " +"``requirements.txt``" +msgstr "确保在 ``requirements.txt`` 中设置了以下版本限制" -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:90 #, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" +msgid "or ``pyproject.toml``:" +msgstr "或 ``pyproject.toml```:" -#: ../../source/ref-api/flwr.client.start_client.rst:2 +#: ../../source/how-to-upgrade-to-flower-next.rst:101 #, fuzzy -msgid "start\\_client" -msgstr "启动客户端" +msgid "Using Poetry" +msgstr "使用 pip" -#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of +#: ../../source/how-to-upgrade-to-flower-next.rst:103 +#, fuzzy msgid "" -"The IPv4 or IPv6 address of the server. If the Flower server runs on the " -"same machine on port 8080, then `server_address` would be " -"`\"[::]:8080\"`." +"Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall " +"(don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before " +"running ``poetry install``)." msgstr "" -"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " -"8080,则`server_address`应为`\"[::]:8080\"`。" - -#: flwr.client.app.start_client:7 of -msgid "A callable that instantiates a Client. (default: None)" -msgstr "用于实例化客户端的可调用程序。(默认值:无)" +"Poetry:更新 ``pyproject.toml`` 中的 ``flwr`` 依赖包,然后重新安装(运行 ``poetry install``" +" 前,别忘了通过 ``rm poetry.lock` 删除 ``poetry.lock`)。" -#: flwr.client.app.start_client:9 of +#: ../../source/how-to-upgrade-to-flower-next.rst:106 +#, fuzzy msgid "" -"An implementation of the abstract base class `flwr.client.Client` " -"(default: None)" -msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" +"Ensure you set the following version constraint in your " +"``pyproject.toml``:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of +#: ../../source/how-to-upgrade-to-flower-next.rst:123 +#, fuzzy msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" server. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower server needs to be started with the same value (see " -"`flwr.server.start_server`), otherwise it will not know about the " -"increased limit and block larger messages." +"In Flower Next, the *infrastructure* and *application layers* have been " +"decoupled. Instead of starting a client in code via ``start_client()``, " +"you create a |clientapp_link|_ and start it via the command line. Instead" +" of starting a server in code via ``start_server()``, you create a " +"|serverapp_link|_ and start it via the command line. The long-running " +"components of server and client are called SuperLink and SuperNode. The " +"following non-breaking changes that require manual updates and allow you " +"to run your project both in the traditional way and in the Flower Next " +"way:" msgstr "" -"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" +"在 Flower Next " +"中,*基础架构层*和*应用层*已经解耦。你不再需要在代码中通过``start_client()``启动客户端,而是创建一个|clientapp_link|_,然后通过命令行启动它。无需通过``start_server()``在代码中启动服务器,而是创建一个" +" |serverapp_link|_ " +"并通过命令行启动它。服务器和客户端的长期运行组件被称为超级链接(SuperLink)和超级节点(SuperNode)。以下是无需手动更新的非破坏性更改,可让您以传统方式和" +" Flower Next 方式运行项目:" -#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of -msgid "" -"The PEM-encoded root certificates as a byte string or a path string. If " -"provided, a secure connection using the certificates will be established " -"to an SSL-enabled Flower server." -msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" +#: ../../source/how-to-upgrade-to-flower-next.rst:132 +#, fuzzy +msgid "|clientapp_link|_" +msgstr "客户端" -#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of +#: ../../source/how-to-upgrade-to-flower-next.rst:134 #, fuzzy msgid "" -"Starts an insecure gRPC connection when True. Enables HTTPS connection " -"when False, using system certificates if `root_certificates` is None." -msgstr "" -"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " -"None,则使用系统证书。" +"Wrap your existing client with |clientapp_link|_ instead of launching it " +"via |startclient_link|_. Here's an example:" +msgstr "用 |clientapp_link|_ 封装现有客户端,而不是通过 |startclient_link|_ 启动。下面是一个例子:" -#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of -msgid "" -"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " -"bidirectional streaming - 'grpc-rere': gRPC, request-response " -"(experimental) - 'rest': HTTP (experimental)" -msgstr "" -"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " -"'rest': HTTP(实验性)" +#: ../../source/how-to-upgrade-to-flower-next.rst:157 +#, fuzzy +msgid "|serverapp_link|_" +msgstr "服务器" -#: flwr.client.app.start_client:31 of +#: ../../source/how-to-upgrade-to-flower-next.rst:159 #, fuzzy msgid "" -"The maximum number of times the client will try to connect to the server " -"before giving up in case of a connection error. If set to None, there is " -"no limit to the number of tries." -msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" +"Wrap your existing strategy with |serverapp_link|_ instead of starting " +"the server via |startserver_link|_. Here's an example:" +msgstr "用 |serverapp_link|_ 包住现有策略,而不是通过 |startserver_link|_ 启动服务器。下面是一个例子:" -#: flwr.client.app.start_client:35 of +#: ../../source/how-to-upgrade-to-flower-next.rst:180 #, fuzzy -msgid "" -"The maximum duration before the client stops trying to connect to the " -"server in case of connection error. If set to None, there is no limit to " -"the total time." -msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" - -#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of -msgid "Starting a gRPC client with an insecure server connection:" -msgstr "使用不安全的服务器连接启动 gRPC 客户端:" - -#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of -#, fuzzy -msgid "Starting an SSL-enabled gRPC client using system certificates:" -msgstr "启动支持 SSL 的 gRPC 客户端:" - -#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of -#, fuzzy -msgid "Starting an SSL-enabled gRPC client using provided certificates:" -msgstr "启动支持 SSL 的 gRPC 客户端:" - -#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 -#, fuzzy -msgid "start\\_numpy\\_client" -msgstr "start_numpy_client" +msgid "Deployment" +msgstr "调配" -#: flwr.client.app.start_numpy_client:5 of +#: ../../source/how-to-upgrade-to-flower-next.rst:182 #, fuzzy msgid "" -"This function is deprecated since 1.7.0. Use " -":code:`flwr.client.start_client` instead and first convert your " -":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " -":code:`to_client()` method." +"Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, " +"in sequence, |flowernext_clientapp_link|_ (2x) and " +"|flowernext_serverapp_link|_. There is no need to execute `client.py` and" +" `server.py` as Python scripts." msgstr "" -"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通过执行 " -":code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr.client.Client`。" - -#: flwr.client.app.start_numpy_client:13 of -msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." -msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" - -#: ../../source/ref-api/flwr.common.rst:2 -msgid "common" -msgstr "常见" +"在依次运行 |flowernext_clientapp_link|_ (2x) 和 |flowernext_serverapp_link|_ " +"之前,使用 |flowernext_superlink_link|_ 运行 ``SuperLink`` 。无需将 |client.py` 和 " +"`server.py` 作为 Python 脚本执行。" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:185 #, fuzzy -msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgid "" +"Here's an example to start the server without HTTPS (only for " +"prototyping):" +msgstr "下面是一个在不使用 HTTPS 的情况下启动服务器的示例(仅用于原型开发):" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.record.conversion_utils.array_from_numpy:1 of +#: ../../source/how-to-upgrade-to-flower-next.rst:201 #, fuzzy -msgid "Create Array from NumPy ndarray." -msgstr "将参数对象转换为 NumPy ndarrays。" +msgid "" +"Here's another example to start with HTTPS. Use the ``--ssl-ca-" +"certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line " +"options to pass paths to (CA certificate, server certificate, and server " +"private key)." +msgstr "下面是另一个使用 HTTPS 的示例。使用 ``--certificates`` 命令行参数传递路径(CA 证书、服务器证书和服务器私钥)。" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:229 #, fuzzy -msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.bytes_to_ndarray:1 of -msgid "Deserialize NumPy ndarray from bytes." -msgstr "从字节反序列化 NumPy ndarray。" +msgid "Simulation in CLI" +msgstr "运行模拟" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:231 #, fuzzy msgid "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" +"Wrap your existing client and strategy with |clientapp_link|_ and " +"|serverapp_link|_, respectively. There is no need to use |startsim_link|_" +" anymore. Here's an example:" msgstr "" -":py:obj:`configure `\\ \\(identifier\\[\\, " -"filename\\, host\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.logger.configure:1 of -msgid "Configure logging to file and/or remote log server." -msgstr "配置将日志记录到文件和/或远程日志服务器。" +"分别用 |clientapp_link|_ 和 |serverapp_link|_ 封装现有的客户端和策略。无需再使用 " +"|startsim_link|_。下面是一个示例:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:264 #, fuzzy msgid "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" +"Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / " +"``client_app`` object in the code instead of executing the Python script." +" Here's an example (assuming the ``server_app`` and ``client_app`` " +"objects are in a ``sim.py`` module):" msgstr "" -":py:obj:`event `\\ \\(event\\_type\\[\\, " -"event\\_details\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.telemetry.event:1 of -#, fuzzy -msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." -msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" +"在 CLI 中运行 |flower_simulation_link|_ 并指向代码中的 ``server_app`` " +"/``client_app`` 对象,而不是执行 Python 脚本。下面是一个示例(假定 `server_app`` 和 " +"`client_app`` 对象位于 `sim.py`` 模块中):" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:281 #, fuzzy msgid "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" +"Set default resources for each |clientapp_link|_ using the ``--backend-" +"config`` command line argument instead of setting the " +"``client_resources`` argument in |startsim_link|_. Here's an example:" msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 -#: of -msgid "Log 'msg % args' with the integer severity 'level'." -msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" +"使用 ``--backend-config`` 命令行参数为每个 |clientapp_link|_ 设置默认资源,而不是在 " +"|startsim_link|_ 中设置 ``client_resources`` 参数。下面是一个例子:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:305 #, fuzzy -msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarray_to_bytes:1 of -msgid "Serialize NumPy ndarray to bytes." -msgstr "将 NumPy ndarray 序列化为字节。" +msgid "Simulation in a Notebook" +msgstr "笔记本中的模拟" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:307 #, fuzzy -msgid ":py:obj:`now `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" - -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.date.now:1 of -msgid "Construct a datetime from time.time() with time zone set to UTC." -msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" +msgid "" +"Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's " +"an example:" +msgstr "在笔记本中运行 |runsim_link|_,而不是 |startsim_link|_。下面是一个例子:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:351 #, fuzzy msgid "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"Some official `Flower code examples `_ " +"are already updated to Flower Next so they can serve as a reference for " +"using the Flower Next API. If there are further questions, `join the " +"Flower Slack `_ and use the channel " +"``#questions``. You can also `participate in Flower Discuss " +"`_ where you can find us answering questions," +" or share and learn from others about migrating to Flower Next." msgstr "" -":py:obj:`ndarrays_to_parameters `\\ " -"\\(ndarrays\\)" +"大多数官方的 `Flower 代码示例 `_" +" 已经更新到 Flower 1.0,它们可以作为使用 Flower 1.0 API 的参考。如果还有其他问题,请加入 Flower Slack " +"`_ 并使用 \"#questions``\"。" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.ndarrays_to_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 -#: of -msgid "Convert NumPy ndarrays to parameters object." -msgstr "将 NumPy ndarrays 转换为参数对象。" +#: ../../source/how-to-upgrade-to-flower-next.rst:358 +#, fuzzy +msgid "Important" +msgstr "重要变更:" -#: ../../source/ref-api/flwr.common.rst:30::1 +#: ../../source/how-to-upgrade-to-flower-next.rst:360 #, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" -msgstr "" -":py:obj:`parameters_to_ndarrays `\\ " -"\\(parameters\\)" +"As we continuously enhance Flower Next at a rapid pace, we'll be " +"periodically updating this guide. Please feel free to share any feedback " +"with us!" +msgstr "随着 Flower Next 的不断快速改进,我们将定期更新本指南。如有任何反馈,请随时与我们分享!" -#: ../../source/ref-api/flwr.common.rst:30::1 -#: flwr.common.parameter.parameters_to_ndarrays:1 of -msgid "Convert parameters object to NumPy ndarrays." -msgstr "将参数对象转换为 NumPy ndarrays。" +#: ../../source/how-to-upgrade-to-flower-next.rst:366 +#, fuzzy +msgid "Happy migrating! 🚀" +msgstr "移民愉快!🚀" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:2 #, fuzzy -msgid "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" -msgstr "" -":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " -"data\\)" +msgid "Use Built-in Mods" +msgstr "使用内置调制器" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.Array:1 of +#: ../../source/how-to-use-built-in-mods.rst:4 #, fuzzy -msgid "Array type." -msgstr "返回类型" +msgid "" +"**Note: This tutorial covers experimental features. The functionality and" +" interfaces may change in future versions.**" +msgstr "**注:本教程涵盖实验性功能。功能和界面可能会在未来版本中发生变化。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:7 #, fuzzy msgid "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" +"In this tutorial, we will learn how to utilize built-in mods to augment " +"the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) " +"allow us to perform operations before and after a task is processed in " +"the ``ClientApp``." msgstr "" -":py:obj:`ClientMessage `\\ " -"\\(\\[get\\_properties\\_res\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ClientMessage:1 of -msgid "ClientMessage is a container used to hold one result message." -msgstr "ClientMessage 是用于容纳一条结果信息的容器。" +"在本教程中,我们将学习如何利用内置模块来增强 ``ClientApp`` 的行为。修改器(有时也称为修改器)允许我们在 ``ClientApp``" +" 处理任务之前和之后执行操作。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:12 #, fuzzy -msgid ":py:obj:`Code `\\ \\(value\\)" -msgstr ":py:obj:`Code `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Code:1 of -msgid "Client status codes." -msgstr "客户端状态代码。" +msgid "What are Mods?" +msgstr "什么是 Mods?" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:14 #, fuzzy msgid "" -":py:obj:`ConfigsRecord `\\ " -"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" +"A Mod is a callable that wraps around a ``ClientApp``. It can manipulate " +"or inspect the incoming ``Message`` and the resulting outgoing " +"``Message``. The signature for a ``Mod`` is as follows:" msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +"Mod 是包裹在 ``ClientApp`` 周围的可调用程序。它可以操作或检查传入的 ``Message`` 和由此产生的传出的 " +"``Message`` 。一个 ``Mod`` 的签名如下:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.configsrecord.ConfigsRecord:1 of +#: ../../source/how-to-use-built-in-mods.rst:23 #, fuzzy -msgid "Configs record." -msgstr "配置日志记录" +msgid "A typical mod function might look something like this:" +msgstr "一个典型的修改函数可能是这样的:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:36 #, fuzzy -msgid ":py:obj:`Context `\\ \\(state\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid "Using Mods" +msgstr "使用修改器" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.context.Context:1 of +#: ../../source/how-to-use-built-in-mods.rst:38 #, fuzzy -msgid "State of your run." -msgstr "您的运行状态。" +msgid "To use mods in your ``ClientApp``, you can follow these steps:" +msgstr "要在您的 ``ClientApp`` 中使用 mod,可以按照以下步骤操作:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:41 #, fuzzy -msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" -msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.DisconnectRes:1 of -msgid "DisconnectRes message from client to server." -msgstr "客户端向服务器发送 DisconnectRes 信息。" +msgid "1. Import the required mods" +msgstr "1. 导入所需修改" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:43 #, fuzzy -msgid "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" -msgstr "" -":py:obj:`EvaluateIns `\\ \\(parameters\\, " -"config\\)" +msgid "First, import the built-in mod you intend to use:" +msgstr "首先,导入您打算使用的内置模式:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateIns:1 of -msgid "Evaluate instructions for a client." -msgstr "评估客户端的指示。" +#: ../../source/how-to-use-built-in-mods.rst:51 +#, fuzzy +msgid "2. Define your client function" +msgstr "2. 定义客户功能" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:53 #, fuzzy msgid "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" -msgstr "" -":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " -"num\\_examples\\, metrics\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.EvaluateRes:1 of -msgid "Evaluate response from a client." -msgstr "评估客户端的反应。" +"Define your client function (``client_fn``) that will be wrapped by the " +"mod(s):" +msgstr "定义将被 mod 封装的客户端函数(``client_fn``):" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:62 #, fuzzy -msgid ":py:obj:`EventType `\\ \\(value\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.telemetry.EventType:1 of -msgid "Types of telemetry events." -msgstr "遥测事件类型。" +msgid "3. Create the ``ClientApp`` with mods" +msgstr "3. 用模块创建 ``ClientApp``" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:64 #, fuzzy -msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgid "" +"Create your ``ClientApp`` and pass the mods as a list to the ``mods`` " +"argument. The order in which you provide the mods matters:" +msgstr "创建您的 ``ClientApp`` 并将 mods 作为列表传递给 ``mods`` 参数。提供 mod 的顺序很重要:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitIns:1 of -msgid "Fit instructions for a client." -msgstr "为客户提供安装说明。" +#: ../../source/how-to-use-built-in-mods.rst:78 +#, fuzzy +msgid "Order of execution" +msgstr "停用" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:80 #, fuzzy msgid "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" -msgstr "" -":py:obj:`FitRes `\\ \\(status\\, parameters\\, " -"num\\_examples\\, metrics\\)" +"When the ``ClientApp`` runs, the mods are executed in the order they are " +"provided in the list:" +msgstr "当运行 ``ClientApp`` 时,会按照列表中提供的顺序执行模块:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.FitRes:1 of -msgid "Fit response from a client." -msgstr "来自客户端的合适回复。" +#: ../../source/how-to-use-built-in-mods.rst:83 +#, fuzzy +msgid "``example_mod_1`` (outermost mod)" +msgstr "``example_mod_1`` (最外层模块)" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:84 #, fuzzy -msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgid "``example_mod_2`` (next mod)" +msgstr "示例模式 2(下一个模式)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Error:1 of +#: ../../source/how-to-use-built-in-mods.rst:85 #, fuzzy -msgid "A dataclass that stores information about an error that occurred." -msgstr "数据类,用于存储所发生错误的相关信息。" +msgid "" +"Message handler (core function that handles the incoming ``Message`` and " +"returns the outgoing ``Message``)" +msgstr "消息处理程序(处理传入的 \"消息 \"并返回传出的 \"消息 \"的核心函数)" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:87 #, fuzzy -msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" -msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgid "``example_mod_2`` (on the way back)" +msgstr "``example_mod_2`` (返回途中)" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersIns:1 of -msgid "Parameters request for a client." -msgstr "客户端的参数请求。" +#: ../../source/how-to-use-built-in-mods.rst:88 +#, fuzzy +msgid "``example_mod_1`` (outermost mod on the way back)" +msgstr "``example_mod_1`` (返回途中最外层的模式)" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:90 #, fuzzy msgid "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" -msgstr "" -":py:obj:`GetParametersRes `\\ \\(status\\, " -"parameters\\)" +"Each mod has a chance to inspect and modify the incoming ``Message`` " +"before passing it to the next mod, and likewise with the outgoing " +"``Message`` before returning it up the stack." +msgstr "每个模块都有机会检查和修改传入的 \"信息\",然后再将其传递给下一个模块,同样,也有机会检查和修改传出的 \"信息\",然后再将其返回堆栈。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetParametersRes:1 of -msgid "Response when asked to return parameters." -msgstr "要求返回参数时的响应。" +#: ../../source/how-to-use-built-in-mods.rst:97 +#, fuzzy +msgid "" +"By following this guide, you have learned how to effectively use mods to " +"enhance your ``ClientApp``'s functionality. Remember that the order of " +"mods is crucial and affects how the input and output are processed." +msgstr "" +"通过本指南,您已学会如何有效地使用 mod 来增强您的 ``ClientApp`` 的功能。请记住,mod " +"的顺序至关重要,它会影响输入和输出的处理方式。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-built-in-mods.rst:101 #, fuzzy -msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgid "Enjoy building a more robust and flexible ``ClientApp`` with mods!" +msgstr "使用 mods 构建更强大、更灵活的 \"客户端应用程序\"!" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesIns:1 of -msgid "Properties request for a client." -msgstr "客户端的属性请求。" +#: ../../source/how-to-use-differential-privacy.rst:2 +#, fuzzy +msgid "Use Differential Privacy" +msgstr "差分隐私" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:4 #, fuzzy msgid "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" +"This guide explains how you can utilize differential privacy in the " +"Flower framework. If you are not yet familiar with differential privacy, " +"you can refer to :doc:`explanation-differential-privacy`." msgstr "" -":py:obj:`GetPropertiesRes `\\ \\(status\\, " -"properties\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.GetPropertiesRes:1 of -msgid "Properties response from a client." -msgstr "来自客户端的属性响应。" +"本指南解释了如何在 Flower 框架中使用差分隐私。如果您还不熟悉差分隐私,可以参考 :doc:`explanation-" +"differential-privacy` 。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:10 #, fuzzy msgid "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" +"Differential Privacy in Flower is in a preview phase. If you plan to use " +"these features in a production environment with sensitive data, feel free" +" contact us to discuss your requirements and to receive guidance on how " +"to best use these features." msgstr "" -":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " -"error\\]\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Message:1 of -#, fuzzy -msgid "State of your application from the viewpoint of the entity using it." -msgstr "从使用实体的角度看应用程序的状态。" +"Flower " +"中的差异隐私处于预览阶段。如果您计划在生产环境中使用这些敏感数据功能,请随时联系我们,讨论您的需求,并获得如何最好地使用这些功能的指导。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:17 #, fuzzy -msgid ":py:obj:`MessageType `\\ \\(\\)" -msgstr ":py:obj:`MessageType `\\ \\(\\)" +msgid "" +"This approach consists of two separate phases: clipping of the updates " +"and adding noise to the aggregated model. For the clipping phase, Flower " +"framework has made it possible to decide whether to perform clipping on " +"the server side or the client side." +msgstr "这种方法包括两个独立的阶段:对更新进行剪切和在聚合模型中添加噪声。在剪切阶段,Flower 框架可以决定是在服务器端还是在客户端执行剪切。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageType:1 of +#: ../../source/how-to-use-differential-privacy.rst:21 #, fuzzy -msgid "Message type." -msgstr "信息类型。" +msgid "" +"**Server-side Clipping**: This approach has the advantage of the server " +"enforcing uniform clipping across all clients' updates and reducing the " +"communication overhead for clipping values. However, it also has the " +"disadvantage of increasing the computational load on the server due to " +"the need to perform the clipping operation for all clients." +msgstr "" +"** 服务器端剪切**: " +"这种方法的优点是服务器可对所有客户端的更新执行统一的剪切,并减少剪切值的通信开销。不过,这种方法也有缺点,那就是需要为所有客户端执行剪切操作,从而增加了服务器的计算负荷。" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:26 #, fuzzy -msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgid "" +"**Client-side Clipping**: This approach has the advantage of reducing the" +" computational overhead on the server. However, it also has the " +"disadvantage of lacking centralized control, as the server has less " +"control over the clipping process." +msgstr "**客户端剪切**: 这种方法的优点是可以减少服务器的计算开销。不过,它也有缺乏集中控制的缺点,因为服务器对剪切过程的控制较少。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.constant.MessageTypeLegacy:1 of +#: ../../source/how-to-use-differential-privacy.rst:31 #, fuzzy -msgid "Legacy message type." -msgstr "传统信息类型。" +msgid "Server-side Clipping" +msgstr "服务器端逻辑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:33 #, fuzzy msgid "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"For central DP with server-side clipping, there are two ``Strategy`` " +"classes that act as wrappers around the actual ``Strategy`` instance (for" +" example, ``FedAvg``). The two wrapper classes are " +"``DifferentialPrivacyServerSideFixedClipping`` and " +"``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive " +"clipping." msgstr "" -":py:obj:`Metadata `\\ \\(run\\_id\\, " -"message\\_id\\, src\\_node\\_id\\, ...\\)" +"对于具有服务器端剪裁功能的中央 DP,有两个 :code:`Strategy` 类作为实际 :code:`Strategy` 实例(例如 " +":code:`FedAvg`)的包装器。这两个封装类分别是 " +":code:`DifferentialPrivacyServerSideFixedClipping` 和 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.message.Metadata:1 of +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid "A dataclass holding metadata associated with the current message." -msgstr "数据类型,包含与当前报文相关的元数据。" +msgid "server side clipping" +msgstr "服务器端逻辑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:43 #, fuzzy msgid "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"The code sample below enables the ``FedAvg`` strategy to use server-side " +"fixed clipping using the ``DifferentialPrivacyServerSideFixedClipping`` " +"wrapper class. The same approach can be used with " +"``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting the " +"corresponding input parameters." msgstr "" -":py:obj:`MetricsRecord `\\ " -"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +"下面的代码示例使用 :code:`DifferentialPrivacyServerSideFixedClipping` 封装类使 " +":code:`FedAvg` 策略使用服务器端固定剪辑。通过调整相应的输入参数,同样的方法也可用于 " +":code:`DifferentialPrivacyServerSideAdaptiveClipping`。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/how-to-use-differential-privacy.rst:64 #, fuzzy -msgid "Metrics record." -msgstr "指标记录。" +msgid "Client-side Clipping" +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:66 #, fuzzy -msgid ":py:obj:`NDArray `\\" -msgstr ":py:obj:`NDArray `\\" +msgid "" +"For central DP with client-side clipping, the server sends the clipping " +"value to selected clients on each round. Clients can use existing Flower " +"``Mods`` to perform the clipping. Two mods are available for fixed and " +"adaptive client-side clipping: ``fixedclipping_mod`` and " +"``adaptiveclipping_mod`` with corresponding server-side wrappers " +"``DifferentialPrivacyClientSideFixedClipping`` and " +"``DifferentialPrivacyClientSideAdaptiveClipping``." +msgstr "" +"对于带有客户端剪裁功能的中央 DP,服务器会在每一轮向选定的客户端发送剪裁值。客户端可以使用现有的 Flower " +":code:`Mods`来执行剪裁。有两种模式可用于固定和自适应客户端剪辑::code:`fixedclipping_mod` 和 " +":code:`adaptiveclipping_mod`,以及相应的服务器端封装 " +":code:`DifferentialPrivacyClientSideFixedClipping` 和 " +":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" + +#: ../../source/how-to-use-differential-privacy.rst:-1 +#, fuzzy +msgid "client side clipping" +msgstr "客户端逻辑" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:78 #, fuzzy msgid "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"The code sample below enables the ``FedAvg`` strategy to use differential" +" privacy with client-side fixed clipping using both the " +"``DifferentialPrivacyClientSideFixedClipping`` wrapper class and, on the " +"client, ``fixedclipping_mod``:" msgstr "" -"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " -":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +"下面的代码示例使用 :code:`DifferentialPrivacyClientSideFixedClipping` 封装类和客户端的 " +":code:`fixedclipping_mod` 使 :code:`FedAvg` 策略在客户端固定剪辑的情况下使用差分隐私:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:97 #, fuzzy msgid "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" +"In addition to the server-side strategy wrapper, the ``ClientApp`` needs " +"to configure the matching ``fixedclipping_mod`` to perform the client-" +"side clipping:" msgstr "" -":py:obj:`Parameters `\\ \\(tensors\\, " -"tensor\\_type\\)" - -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Parameters:1 of -msgid "Model parameters." -msgstr "模型参数。" +"除了服务器端策略包装器外,:code:`ClientApp` 还需要配置匹配的 :code:`fixedclipping_mod` " +"以执行客户端剪切:" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:115 #, fuzzy msgid "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"To utilize local differential privacy (DP) and add noise to the client " +"model parameters before transmitting them to the server in Flower, you " +"can use the `LocalDpMod`. The following hyperparameters need to be set: " +"clipping norm value, sensitivity, epsilon, and delta." msgstr "" -":py:obj:`ParametersRecord `\\ " -"\\(\\[array\\_dict\\, keep\\_input\\]\\)" +"要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " +"`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy -msgid "Parameters record." -msgstr "参数" +msgid "local DP mod" +msgstr "本地 DP 模式" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:125 #, fuzzy -msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgid "Below is a code example that shows how to use ``LocalDpMod``:" +msgstr "下面的代码示例展示了如何使用 :code:`LocalDpMod`:" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ReconnectIns:1 of -msgid "ReconnectIns message from server to client." -msgstr "服务器发送给客户端的重新连接信息。" - -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:140 #, fuzzy msgid "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" -msgstr "" -":py:obj:`RecordSet `\\ " -"\\(\\[parameters\\_records\\, ...\\]\\)" +"Please note that the order of mods, especially those that modify " +"parameters, is important when using multiple modifiers. Typically, " +"differential privacy (DP) modifiers should be the last to operate on " +"parameters." +msgstr "请注意,在使用多个修改器时,修改器(尤其是修改参数的修改器)的顺序非常重要。通常情况下,差分隐私 (DP) 修改器应最后对参数进行操作。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.record.recordset.RecordSet:1 of +#: ../../source/how-to-use-differential-privacy.rst:145 #, fuzzy -msgid "RecordSet stores groups of parameters, metrics and configs." -msgstr "RecordSet 可存储参数、指标和配置组。" +msgid "Local Training using Privacy Engines" +msgstr "使用隐私引擎进行本地培训" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-differential-privacy.rst:147 #, fuzzy msgid "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"For ensuring data instance-level privacy during local model training on " +"the client side, consider leveraging privacy engines such as Opacus and " +"TensorFlow Privacy. For examples of using Flower with these engines, " +"please refer to the Flower examples directory (`Opacus " +"`_, `Tensorflow" +" Privacy `_)." msgstr "" -":py:obj:`ServerMessage `\\ " -"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +"要在客户端本地模型训练期间确保数据实例级隐私,可考虑利用 Opacus 和 TensorFlow Privacy 等隐私引擎。有关将 Flower" +" 与这些引擎结合使用的示例,请参阅 Flower 示例目录(`Opacus " +"`_, `Tensorflow" +" Privacy `_)。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.ServerMessage:1 of -msgid "ServerMessage is a container used to hold one instruction message." -msgstr "ServerMessage 是用于容纳一条指令信息的容器。" +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "使用策略" -#: ../../source/ref-api/flwr.common.rst:64::1 +#: ../../source/how-to-use-strategies.rst:4 #, fuzzy -msgid ":py:obj:`Status `\\ \\(code\\, message\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +msgid "" +"Flower allows full customization of the learning process through the " +"``Strategy`` abstraction. A number of built-in strategies are provided in" +" the core framework." +msgstr "Flower 允许通过 :code:`Strategy` 抽象类对学习过程进行完全定制。核心框架中提供了许多内置策略。" -#: ../../source/ref-api/flwr.common.rst:64::1 -#: flwr.common.typing.Status:1 of -msgid "Client status." -msgstr "客户端状态。" +#: ../../source/how-to-use-strategies.rst:7 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "有三种方法可以自定义 Flower 在服务器端协调学习过程的方式:" -#: ../../source/ref-api/flwr.common.Array.rst:2 +#: ../../source/how-to-use-strategies.rst:10 #, fuzzy -msgid "Array" -msgstr "数组" +msgid "Use an existing strategy, for example, ``FedAvg``" +msgstr "使用现有策略,例如 :code:`FedAvg`" -#: flwr.common.record.parametersrecord.Array:3 of -#, fuzzy +#: ../../source/how-to-use-strategies.rst:11 +#: ../../source/how-to-use-strategies.rst:43 +msgid "Customize an existing strategy with callback functions" +msgstr "使用回调函数定制现有策略" + +#: ../../source/how-to-use-strategies.rst:12 +#: ../../source/how-to-use-strategies.rst:99 +msgid "Implement a novel strategy" +msgstr "实施新策略" + +#: ../../source/how-to-use-strategies.rst:15 +msgid "Use an existing strategy" +msgstr "使用现有策略" + +#: ../../source/how-to-use-strategies.rst:17 msgid "" -"A dataclass containing serialized data from an array-like or tensor-like " -"object along with some metadata about it." -msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "Flower 内置了许多流行的联邦学习策略。内置策略的实例化方法如下:" -#: flwr.common.record.parametersrecord.Array:6 of +#: ../../source/how-to-use-strategies.rst:27 #, fuzzy msgid "" -"A string representing the data type of the serialised object (e.g. " -"`np.float32`)" -msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" +"This creates a strategy with all parameters left at their default values " +"and passes it to the ``start_server`` function. It is usually recommended" +" to adjust a few parameters during instantiation:" +msgstr "这会创建一个所有参数都保持默认值的策略,并将其传递给 :code:`start_server` 函数。通常建议在实例化过程中调整一些参数:" -#: flwr.common.record.parametersrecord.Array:8 of -#, fuzzy +#: ../../source/how-to-use-strategies.rst:45 msgid "" -"A list representing the shape of the unserialized array-like object. This" -" is used to deserialize the data (depending on the serialization method) " -"or simply as a metadata field." -msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或仅作为元数据字段使用。" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "现有的策略提供了多种自定义行为的方法。回调函数允许策略在执行过程中调用用户提供的代码。" -#: flwr.common.record.parametersrecord.Array:12 of +#: ../../source/how-to-use-strategies.rst:49 +msgid "Configuring client fit and client evaluate" +msgstr "配置客户匹配和客户评估" + +#: ../../source/how-to-use-strategies.rst:51 #, fuzzy msgid "" -"A string indicating the type of serialisation mechanism used to generate " -"the bytes in `data` from an array-like or tensor-like object." -msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的字节。" +"The server can pass new configuration values to the client each round by " +"providing a function to ``on_fit_config_fn``. The provided function will " +"be called by the strategy and must return a dictionary of configuration " +"key values pairs that will be sent to the client. It must return a " +"dictionary of arbitrary configuration values ``client.fit`` and " +"``client.evaluate`` functions during each round of federated learning." +msgstr "" +"服务器可以通过向 :code:`on_fit_config_fn` " +"提供一个函数,在每一轮向客户端传递新的配置值。提供的函数将被策略调用,并且必须返回一个配置键值对的字典,该字典将被发送到客户端。在每一轮联邦学习期间,它必须返回一个任意配置值" +" dictionary :code:`client.fit`和 :code:`client.evaluate`函数。" -#: flwr.common.record.parametersrecord.Array:15 of +#: ../../source/how-to-use-strategies.rst:84 #, fuzzy -msgid "A buffer of bytes containing the data." -msgstr "包含数据的字节缓冲区。" +msgid "" +"The ``on_fit_config_fn`` can be used to pass arbitrary configuration " +"values from server to client, and potentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the ``on_fit_config_fn`` in its own " +"``client.fit()`` function." +msgstr "" +":code:`on_fit_config_fn`可用于将任意配置值从服务器传递到客户端,并在每一轮改变这些值,例如,调整学习率。客户端将在自己的 " +":code:`client.fit()` 函数中接收 :code:`on_fit_config_fn` 返回的字典。" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: ../../source/how-to-use-strategies.rst:89 #, fuzzy -msgid ":py:obj:`numpy `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "" +"Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` " +"to customize the configuration sent to ``client.evaluate()``" +msgstr "" +"与 :code:`on_fit_config_fn` 类似,还有 :code:`on_evaluate_config_fn` 用于定制发送到 " +":code:`client.evaluate()` 的配置" -#: ../../source/ref-api/flwr.common.Array.rst:26::1 -#: flwr.common.record.parametersrecord.Array.numpy:1 of -#, fuzzy -msgid "Return the array as a NumPy array." -msgstr "以 NumPy ndarrays 列表形式返回模型参数" +#: ../../source/how-to-use-strategies.rst:93 +msgid "Configuring server-side evaluation" +msgstr "配置服务器端评估" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of +#: ../../source/how-to-use-strategies.rst:95 #, fuzzy -msgid ":py:obj:`dtype `\\" -msgstr ":py:obj:`dtype `\\" +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to ``evaluate_fn``." +msgstr "服务器端评估可通过向 :code:`evaluate_fn` 传递评估函数来启用。" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`shape `\\" -msgstr "server.strategy.Strategy" +#: ../../source/how-to-use-strategies.rst:101 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +msgstr "" +"编写完全自定义的策略涉及的内容较多,但灵活性最高。阅读 `实施策略 _ " +"指南,了解更多信息。" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`stype `\\" -msgstr "server.strategy.Strategy" +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "教程" -#: flwr.common.record.parametersrecord.Array.numpy:1::1 of -#, fuzzy -msgid ":py:obj:`data `\\" -msgstr ":py:obj:`data `\\" +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "快速入门教程" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 -#, fuzzy -msgid "ClientMessage" -msgstr "客户端" +#: ../../source/index.rst:81 ../../source/index.rst:85 +msgid "How-to guides" +msgstr "操作指南" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy -msgid ":py:obj:`evaluate_res `\\" -msgstr ":py:obj:`evaluate_res `\\" +#: ../../source/index.rst:106 +msgid "Legacy example guides" +msgstr "旧版指南范例" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy -msgid ":py:obj:`fit_res `\\" -msgstr ":py:obj:`fit_res `\\" +#: ../../source/index.rst:114 ../../source/index.rst:119 +msgid "Explanations" +msgstr "说明" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy -msgid "" -":py:obj:`get_parameters_res " -"`\\" -msgstr "" -":py:obj:`get_parameters_res " -"`\\" +#: None:-1 +msgid "API reference" +msgstr "应用程序接口参考" -#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 -#, fuzzy -msgid "" -":py:obj:`get_properties_res " -"`\\" -msgstr "" -":py:obj:`get_properties_res " -"`\\" +#: ../../source/index.rst:145 +msgid "Reference docs" +msgstr "参考文档" -#: ../../source/ref-api/flwr.common.Code.rst:2 -#, fuzzy -msgid "Code" -msgstr "代码" +#: ../../source/index.rst:160 +msgid "Contributor tutorials" +msgstr "贡献者教程" -#: flwr.common.typing.Code:1 of -#, fuzzy -msgid "Bases: :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`~enum.Enum`" +#: ../../source/index.rst:167 +msgid "Contributor how-to guides" +msgstr "投稿指南" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -#, fuzzy -msgid ":py:obj:`OK `\\" -msgstr ":py:obj:`OK `\\" +#: ../../source/index.rst:179 +msgid "Contributor explanations" +msgstr "贡献者解释" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -#, fuzzy -msgid "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" -msgstr "" -":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " -"`\\" +#: ../../source/index.rst:185 +msgid "Contributor references" +msgstr "贡献者参考资料" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -#, fuzzy +#: ../../source/index.rst:-1 msgid "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" -msgstr "" -":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " -"`\\" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "查看主 Flower Framework 的文档,轻松实现联邦学习的 Python 开发。" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -#, fuzzy -msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "Flower 框架文档" -#: ../../source/ref-api/flwr.common.Code.rst:26::1 -#, fuzzy +#: ../../source/index.rst:7 msgid "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" -msgstr "" -":py:obj:`EVALUATE_NOT_IMPLEMENTED " -"`\\" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "欢迎访问 Flower 文档。`Flower `_ 是一个友好的联邦学习框架。" -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 -#, fuzzy -msgid "ConfigsRecord" -msgstr "配置日志记录" +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "加入 Flower 社区" -#: flwr.common.record.configsrecord.ConfigsRecord:1 of -#, fuzzy +#: ../../source/index.rst:13 msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" -" :py:class:`bytes` | :py:class:`bool` | :py:class:`~typing.List`\\ " -"[:py:class:`int`] | :py:class:`~typing.List`\\ [:py:class:`float`] | " -":py:class:`~typing.List`\\ [:py:class:`str`] | :py:class:`~typing.List`\\" -" [:py:class:`bytes`] | :py:class:`~typing.List`\\ [:py:class:`bool`]]" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "Flower 社区发展迅速--我们是一个由研究人员、工程师、学生、专业人士、学者和其他爱好者组成的友好团体。" + +#: ../../source/index.rst:16 +msgid "Join us on Slack" +msgstr "在 Slack 上加入我们" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "Flower 框架" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " -":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " -"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " -":py:class:`~typing.List`\\ [:py:class:`bytes`], " -":py:class:`~typing.List`\\ [:py:class:`bool`]]]" +"该用户指南面向希望使用 Flower 将现有机器学习工作负载引入联邦环境的研究人员和开发人员。Flower " +"的设计目标之一就是让这一切变得简单。请继续阅读,了解更多信息。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "教程" -#: flwr.common.record.typeddict.TypedDict.clear:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid "Remove all items from R." -msgstr "从 R 中删除所有项目。" +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "以学习为导向的联邦学习教程系列,最好的起点。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/index.rst:62 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas " +"` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | " +":doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS `" +msgstr "" +"快速入门教程: :doc:`PyTorch ` | :doc:`TensorFlow " +"` | :doc:`🤗 Transformers ` | :doc:`JAX ` | " +":doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`MXNet ` | :doc" +":`scikit-learn ` | :doc:`XGBoost " +"` | :doc:`Android ` | :doc:`iOS `" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 -#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid "Return number of Bytes stored in this object." -msgstr "返回存储在此对象中的字节数。" +#: ../../source/index.rst:70 +msgid "We also made video tutorials for PyTorch:" +msgstr "我们还为 PyTorch 制作了视频教程:" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +#: ../../source/index.rst:75 +msgid "And TensorFlow:" +msgstr "还有 TensorFlow:" + +#: ../../source/index.rst:83 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "以问题为导向的 \"如何做 \"指南逐步展示如何实现特定目标。" + +#: ../../source/index.rst:116 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "以理解为导向的概念指南解释并讨论了Flower和协作式人工智能背后的关键主题和基本思想。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.get:1 of +#: ../../source/index.rst:128 +msgid "References" +msgstr "参考资料" + +#: ../../source/index.rst:130 +msgid "Information-oriented API reference and other reference material." +msgstr "以信息为导向的 API 参考资料和其他参考资料。" + +#: ../../source/index.rst:139::1 #, fuzzy -msgid "d defaults to None." -msgstr "d 默认为 \"无\"。" +msgid ":py:obj:`flwr `\\" +msgstr ":py:obj:`flwr `\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/index.rst:139::1 flwr:1 of #, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +msgid "Flower main package." +msgstr "Flower 主包装。" + +#: ../../source/index.rst:155 +msgid "Contributor docs" +msgstr "贡献者文档" + +#: ../../source/index.rst:157 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "Flower 社区欢迎您的贡献。以下文档旨在为您提供帮助。" + +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "Flower CLI 参考" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api-cli.rst:7 #, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +msgid "flwr CLI" +msgstr "Flower 客户端" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../flwr:1 #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid "flwr is the Flower command line interface." +msgstr "注册 Flower ClientProxy 实例。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.pop:1 of +#: ../../source/ref-api-cli.rst #, fuzzy -msgid "If key is not found, d is returned if given, otherwise KeyError is raised." -msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" +msgid "Options" +msgstr "解决方案" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../flwr:1 #, fuzzy +msgid "Install completion for the current shell." +msgstr "当前运行的标识符。" + +#: ../../flwr:1 msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +"Show completion for the current shell, to copy it or customize the " +"installation." msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 -#: flwr.common.record.typeddict.TypedDict.update:1 of -#, fuzzy -msgid "Update R from dict/iterable E and F." -msgstr "根据二进制/可迭代 E 和 F 更新 R。" +#: ../../flwr build:1 +msgid "Build a Flower App into a Flower App Bundle (FAB)." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +#: ../../flwr build:1 +msgid "" +"You can run ``flwr build`` without any arguments to bundle the app " +"located in the current directory. Alternatively, you can you can specify " +"a path using the ``--app`` option to bundle an app located at the " +"provided path. For example:" +msgstr "" -#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of -#, fuzzy -msgid "This function counts booleans as occupying 1 Byte." -msgstr "该函数将布尔值计算为占用 1 个字节。" +#: ../../flwr build:1 +msgid "``flwr build --app ./apps/flower-hello-world``." +msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:2 -#, fuzzy -msgid "Context" -msgstr "背景" +#: ../../flwr build:1 +msgid "Path of the Flower App to bundle into a FAB" +msgstr "" -#: flwr.common.context.Context:3 of +#: ../../flwr install:1 #, fuzzy +msgid "Install a Flower App Bundle." +msgstr "安装Flower" + +#: ../../flwr install:1 +msgid "It can be ran with a single FAB file argument:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab``" +msgstr "" + +#: ../../flwr install:1 +msgid "The target install directory can be specified with ``--flwr-dir``:" +msgstr "" + +#: ../../flwr install:1 +msgid "``flwr install ./target_project.fab --flwr-dir ./docs/flwr``" +msgstr "" + +#: ../../flwr install:1 msgid "" -"Holds records added by the entity in a given run and that will stay " -"local. This means that the data it holds will never leave the system it's" -" running from. This can be used as an intermediate storage or scratchpad " -"when executing mods. It can also be used as a memory to access at " -"different points during the lifecycle of this entity (e.g. across " -"multiple rounds)" -msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" +"This will install ``target_project`` to ``./docs/flwr/``. By default, " +"``flwr-dir`` is equal to:" +msgstr "" -#: ../../source/ref-api/flwr.common.Context.rst:28::1 -#, fuzzy -msgid ":py:obj:`state `\\" -msgstr "server.strategy.Strategy" +#: ../../flwr install:1 +msgid "``$FLWR_HOME/`` if ``$FLWR_HOME`` is defined" +msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 -#, fuzzy -msgid "DisconnectRes" -msgstr "断开Res" +#: ../../flwr install:1 +msgid "``$XDG_DATA_HOME/.flwr/`` if ``$XDG_DATA_HOME`` is defined" +msgstr "" -#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 -#, fuzzy -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../flwr install:1 +msgid "``$HOME/.flwr/`` in all other cases" +msgstr "" -#: ../../source/ref-api/flwr.common.Error.rst:2 -#, fuzzy -msgid "Error" -msgstr "错误" +#: ../../flwr install:1 +msgid "The desired install path." +msgstr "" -#: flwr.common.message.Error:3 of +#: ../../source/ref-api-cli.rst #, fuzzy -msgid "An identifier for the error." -msgstr "错误的标识符。" +msgid "Arguments" +msgstr "参数解析器" -#: flwr.common.message.Error:5 of +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy -msgid "A reason for why the error arose (e.g. an exception stack-trace)" -msgstr "出错原因(如异常堆栈跟踪)" +msgid "Optional argument" +msgstr "可选的改进措施" -#: flwr.common.Error.code:1::1 of -#, fuzzy -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +#: ../../flwr install:1 +msgid "The source FAB file to install." +msgstr "" -#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of -#, fuzzy -msgid "Error code." -msgstr "错误代码。" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" -#: flwr.common.Error.code:1::1 of -#, fuzzy -msgid ":py:obj:`reason `\\" -msgstr ":py:obj:`reason `\\" +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" -#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of +#: ../../flwr log run #, fuzzy -msgid "Reason reported about the error." -msgstr "报告的错误原因。" +msgid "default" +msgstr "工作流程" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 -#, fuzzy -msgid "EvaluateIns" -msgstr "说明" +#: ../../flwr log:1 +msgid "``True``" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#: ../../flwr log:1 #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "Required argument" +msgstr "构建文档" -#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#: ../../flwr log:1 #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid "The Flower run ID to query" +msgstr "加入 Flower 社区" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 -#, fuzzy -msgid "EvaluateRes" -msgstr "评估Res" +#: ../../flwr log:1 +msgid "Path of the Flower project to run" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: ../../flwr log:1 +msgid "Name of the federation to run the app on" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#: ../../flwr new:1 #, fuzzy -msgid ":py:obj:`loss `\\" -msgstr ":py:obj:`loss `\\" +msgid "Create new Flower App." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: ../../flwr new:1 +msgid "The ML framework to use" +msgstr "" -#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#: ../../flwr new #, fuzzy -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +msgid "options" +msgstr "解决方案" -#: ../../source/ref-api/flwr.common.EventType.rst:2 +#: ../../flwr new:1 +msgid "" +"PyTorch | TensorFlow | sklearn | HuggingFace | JAX | MLX | NumPy | " +"FlowerTune | Flower Baseline" +msgstr "" + +#: ../../flwr new:1 +msgid "The Flower username of the author" +msgstr "" + +#: ../../flwr new:1 #, fuzzy -msgid "EventType" -msgstr "返回类型" +msgid "The name of the Flower App" +msgstr "基础镜像的存储库名称。" -#: flwr.common.telemetry.EventType:1 of +#: ../../flwr run:1 #, fuzzy -msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgid "Run Flower App." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../flwr run:1 +msgid "Override configuration key-value pairs, should be of the format:" +msgstr "" + +#: ../../flwr run:1 msgid "" -":py:obj:`encode `\\ \\(\\[encoding\\, " -"errors\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"`--run-config 'key1=\"value1\" key2=\"value2\"' --run-config " +"'key3=\"value3\"'`" +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.encode:1 of -#, fuzzy -msgid "Encode the string using the codec registered for encoding." -msgstr "使用注册的编码解码器对字符串进行编码。" +#: ../../flwr run:1 +msgid "" +"Note that `key1`, `key2`, and `key3` in this example need to exist inside" +" the `pyproject.toml` in order to be properly overriden." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: ../../flwr run:1 msgid "" -":py:obj:`replace `\\ \\(old\\, new\\[\\, " -"count\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"Use `--stream` with `flwr run` to display logs; logs are not streamed by " +"default." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.replace:1 of +#: ../../flwr run:1 #, fuzzy -msgid "Return a copy with all occurrences of substring old replaced by new." -msgstr "返回用 new 替换子串 old 的所有出现次数的副本。" +msgid "``False``" +msgstr "``FLWR_VERSION``" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../flwr run:1 #, fuzzy -msgid "" -":py:obj:`split `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr ":py:obj:`PING `\\" +msgid "Path of the Flower App to run." +msgstr "基础镜像的存储库名称。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of -#, fuzzy -msgid "" -"Return a list of the substrings in the string, using sep as the separator" -" string." -msgstr "使用 sep 作为分隔符,返回字符串中的子字符串列表。" +#: ../../flwr run:1 +msgid "Name of the federation to run the app on." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:16 #, fuzzy -msgid "" -":py:obj:`rsplit `\\ \\(\\[sep\\, " -"maxsplit\\]\\)" -msgstr ":py:obj:`PING `\\" +msgid "flower-simulation" +msgstr "运行模拟" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/ref-api-cli.rst:26 +msgid "flower-superlink" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.join:1 of +#: ../../source/ref-api-cli.rst:36 #, fuzzy -msgid "Concatenate any number of strings." -msgstr "连接任意数量的字符串。" +msgid "flower-supernode" +msgstr "Flower 服务器" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:46 #, fuzzy -msgid ":py:obj:`capitalize `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid "flower-server-app" +msgstr "flower-driver-api" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.capitalize:1 of -#, fuzzy -msgid "Return a capitalized version of the string." -msgstr "返回字符串的大写版本。" +#: ../../source/ref-api-cli.rst:50 +msgid "" +"Note that since version ``1.11.0``, ``flower-server-app`` no longer " +"supports passing a reference to a `ServerApp` attribute. Instead, you " +"need to pass the path to Flower app via the argument ``--app``. This is " +"the path to a directory containing a `pyproject.toml`. You can create a " +"valid Flower app by executing ``flwr new`` and following the prompt." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api-cli.rst:64 #, fuzzy -msgid ":py:obj:`casefold `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "flower-superexec" +msgstr "flower-superlink" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.casefold:1 of +#: ../../source/ref-api/flwr.rst:2 #, fuzzy -msgid "Return a version of the string suitable for caseless comparisons." -msgstr "返回适合无例比较的字符串版本。" +msgid "flwr" +msgstr "Flower" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:43 ../../source/ref-api/flwr.rst:25 +#: ../../source/ref-api/flwr.server.rst:48 #, fuzzy -msgid ":py:obj:`title `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Modules" +msgstr "模块" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.title:1 of +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid "Return a version of the string where each word is titlecased." -msgstr "返回字符串的版本,其中每个单词都使用了标题大小写。" +msgid ":py:obj:`flwr.client `\\" +msgstr ":py:obj:`flwr.client `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`center `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of +msgid "Flower client." +msgstr "Flower 客户端。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.center:1 of +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid "Return a centered string of length width." -msgstr "返回客户端的属性集。" +msgid ":py:obj:`flwr.common `\\" +msgstr ":py:obj:`flwr.common `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "服务器和客户端共享的通用组件。" + +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid "" -":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid ":py:obj:`flwr.server `\\" +msgstr ":py:obj:`flwr.server `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 +#: ../../source/ref-api/flwr.server.rst:37::1 flwr.server:1 +#: flwr.server.server.Server:1 of +msgid "Flower server." +msgstr "Flower 服务器。" + +#: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]." -msgstr "返回子字符串 sub 在字符串 S[start:end] 中非重叠出现的次数。" +msgid ":py:obj:`flwr.simulation `\\" +msgstr ":py:obj:`flwr.simulation `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of #, fuzzy -msgid "" -":py:obj:`expandtabs `\\ " -"\\(\\[tabsize\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Flower simulation." +msgstr "运行模拟" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.expandtabs:1 of +#: ../../source/ref-api/flwr.client.rst:2 +msgid "client" +msgstr "客户端" + +#: ../../source/ref-api/flwr.client.mod.rst:13 +#: ../../source/ref-api/flwr.client.rst:13 +#: ../../source/ref-api/flwr.common.rst:13 +#: ../../source/ref-api/flwr.server.rst:13 +#: ../../source/ref-api/flwr.simulation.rst:13 #, fuzzy -msgid "Return a copy where all tab characters are expanded using spaces." -msgstr "返回使用空格扩展所有制表符的副本。" +msgid "Functions" +msgstr "四种函数:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 #, fuzzy msgid "" -":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`PING `\\" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" +msgstr "" +":py:obj:`start_client `\\ \\(\\*\\, " +"server\\_address\\[\\, client\\_fn\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "启动一个 Flower 客户节点,连接到 Flower 服务器。" + +#: ../../source/ref-api/flwr.client.rst:23::1 #, fuzzy msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]." -msgstr "返回在 S 中找到子串 sub 的最低索引,且 sub 包含在 S[start:end] 中。" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" +msgstr "" +":py:obj:`start_numpy_client `\\ \\(\\*\\," +" server\\_address\\, client\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" -msgstr ":py:obj:`partition_id `\\" +#: ../../source/ref-api/flwr.client.rst:23::1 +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "启动 Flower NumPyClient,连接到 gRPC 服务器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of +#: ../../source/ref-api/flwr.client.mod.rst:30 +#: ../../source/ref-api/flwr.client.rst:25 +#: ../../source/ref-api/flwr.common.rst:32 +#: ../../source/ref-api/flwr.server.rst:24 +#: ../../source/ref-api/flwr.server.strategy.rst:17 +#: ../../source/ref-api/flwr.server.workflow.rst:17 #, fuzzy -msgid "Partition the string into three parts using the given separator." -msgstr "使用给定的分隔符将字符串分为三部分。" +msgid "Classes" +msgstr "类别" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid "" -":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid ":py:obj:`Client `\\ \\(\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "Flower 客户端的抽象基类。" + +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy msgid "" -":py:obj:`ljust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.ljust:1 of +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.client_app.ClientApp:1 of #, fuzzy -msgid "Return a left-justified string of length width." -msgstr "返回长度为 width 的左对齐字符串。" +msgid "Flower ClientApp." +msgstr "Flower 客户端。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:32::1 #, fuzzy -msgid ":py:obj:`lower `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +msgid ":py:obj:`NumPyClient `\\ \\(\\)" +msgstr ":py:obj:`NumPyClient `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lower:1 of -#, fuzzy -msgid "Return a copy of the string converted to lowercase." -msgstr "返回转换为小写的字符串副本。" +#: ../../source/ref-api/flwr.client.rst:32::1 +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`flwr.client.mod `\\" +msgstr ":py:obj:`flwr.client `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.lstrip:1 of +#: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of #, fuzzy -msgid "Return a copy of the string with leading whitespace removed." -msgstr "返回去掉前导空白的字符串副本。" +msgid "Flower Built-in Mods." +msgstr "使用内置调制器" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client.Client:1 flwr.client.numpy_client.NumPyClient:1 +#: flwr.server.client_manager.ClientManager:1 +#: flwr.server.driver.driver.Driver:1 flwr.server.strategy.strategy.Strategy:1 +#: of #, fuzzy -msgid "" -":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " -"end\\]\\]\\)" -msgstr ":py:obj:`PING `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]." -msgstr "返回在 S 中找到子串 sub 的最高索引,且 sub 包含在 S[start:end] 中。" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`rindex `\\ \\(sub\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`Context `\\ \\(state\\)" +msgid "Bases: :py:class:`~abc.ABC`" +msgstr "Bases: :py:class:`~abc.ABC`" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:15 +#: ../../source/ref-api/flwr.client.ClientApp.rst:15 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:15 +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:15 +#: ../../source/ref-api/flwr.common.Array.rst:15 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:15 +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Context.rst:15 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:15 +#: ../../source/ref-api/flwr.common.Error.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:15 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:15 +#: ../../source/ref-api/flwr.common.EventType.rst:15 +#: ../../source/ref-api/flwr.common.FitIns.rst:15 +#: ../../source/ref-api/flwr.common.FitRes.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:15 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:15 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:15 +#: ../../source/ref-api/flwr.common.Message.rst:15 +#: ../../source/ref-api/flwr.common.MessageType.rst:15 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:15 +#: ../../source/ref-api/flwr.common.Metadata.rst:15 +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:15 +#: ../../source/ref-api/flwr.common.Parameters.rst:15 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:15 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:15 +#: ../../source/ref-api/flwr.common.RecordSet.rst:15 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:15 +#: ../../source/ref-api/flwr.common.Status.rst:15 +#: ../../source/ref-api/flwr.server.ClientManager.rst:15 +#: ../../source/ref-api/flwr.server.Driver.rst:15 +#: ../../source/ref-api/flwr.server.History.rst:15 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:15 +#: ../../source/ref-api/flwr.server.Server.rst:15 +#: ../../source/ref-api/flwr.server.ServerApp.rst:15 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:15 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:15 +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:15 +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:15 +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:15 +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:15 +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:15 #, fuzzy -msgid "" -":py:obj:`rjust `\\ \\(width\\[\\, " -"fillchar\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "Methods" +msgstr "方法" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rjust:1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "Return a right-justified string of length width." -msgstr "返回长度为 width 的右对齐字符串。" +msgid ":py:obj:`evaluate `\\ \\(ins\\)" +msgstr ":py:obj:`evaluate `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "使用本地数据集评估所提供的参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.rstrip:1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "Return a copy of the string with trailing whitespace removed." -msgstr "返回去掉尾部空白的字符串副本。" +msgid ":py:obj:`fit `\\ \\(ins\\)" +msgstr ":py:obj:`fit `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" -msgstr ":py:obj:`partition_id `\\" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "利用本地数据集完善所提供的参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid "" -":py:obj:`splitlines `\\ " -"\\(\\[keepends\\]\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.splitlines:1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_context:1 +#: flwr.client.numpy_client.NumPyClient.get_context:1 of #, fuzzy -msgid "Return a list of the lines in the string, breaking at line boundaries." -msgstr "返回字符串中的行列表,以行为分界线。" +msgid "Get the run context from this client." +msgstr "评估客户端的反应。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`get_parameters `\\ \\(ins\\)" +msgstr ":py:obj:`get_parameters `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.strip:1 of -#, fuzzy -msgid "Return a copy of the string with leading and trailing whitespace removed." -msgstr "返回去掉前导和尾部空白的字符串副本。" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "返回当前本地模型参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`swapcase `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`get_properties `\\ \\(ins\\)" +msgstr ":py:obj:`get_properties `\\ \\(ins\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.swapcase:1 of -#, fuzzy -msgid "" -"Convert uppercase characters to lowercase and lowercase characters to " -"uppercase." -msgstr "将大写字母转换为小写字母,将小写字母转换为大写字母。" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "返回客户端的属性集。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid ":py:obj:`set_context `\\ \\(context\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.translate:1 of +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.client.Client.set_context:1 +#: flwr.client.numpy_client.NumPyClient.set_context:1 of #, fuzzy -msgid "Replace each character in the string using the given translation table." -msgstr "使用给定的翻译表替换字符串中的每个字符。" +msgid "Apply a run context to this client." +msgstr "将运行上下文应用于该客户端。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:44::1 #, fuzzy -msgid ":py:obj:`upper `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.upper:1 of -#, fuzzy -msgid "Return a copy of the string converted to uppercase." -msgstr "返回转换为大写字符串的副本。" +#: ../../source/ref-api/flwr.client.Client.rst:44::1 +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "返回客户端(本身)。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.Client.rst:46 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:46 +#: ../../source/ref-api/flwr.common.Array.rst:28 +#: ../../source/ref-api/flwr.common.ClientMessage.rst:25 +#: ../../source/ref-api/flwr.common.Code.rst:19 +#: ../../source/ref-api/flwr.common.Context.rst:25 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:25 +#: ../../source/ref-api/flwr.common.Error.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:25 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:25 +#: ../../source/ref-api/flwr.common.EventType.rst:165 +#: ../../source/ref-api/flwr.common.FitIns.rst:25 +#: ../../source/ref-api/flwr.common.FitRes.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:25 +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:25 +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:25 +#: ../../source/ref-api/flwr.common.Message.rst:37 +#: ../../source/ref-api/flwr.common.MessageType.rst:25 +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:25 +#: ../../source/ref-api/flwr.common.Metadata.rst:25 +#: ../../source/ref-api/flwr.common.Parameters.rst:25 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:25 +#: ../../source/ref-api/flwr.common.RecordSet.rst:25 +#: ../../source/ref-api/flwr.common.ServerMessage.rst:25 +#: ../../source/ref-api/flwr.common.Status.rst:25 +#: ../../source/ref-api/flwr.server.Driver.rst:40 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:25 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:25 +#: ../../source/ref-api/flwr.server.ServerConfig.rst:25 #, fuzzy -msgid "" -":py:obj:`startswith `\\ \\(prefix\\[\\," -" start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" +msgid "Attributes" +msgstr "属性" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.Client.context:1::1 of #, fuzzy -msgid "Return True if S starts with the specified prefix, False otherwise." -msgstr "如果 S 以指定前缀开头,则返回 True,否则返回 False。" +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "" -":py:obj:`endswith `\\ \\(suffix\\[\\, " -"start\\[\\, end\\]\\]\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.Client.context:1 flwr.client.Client.context:1::1 +#: flwr.client.NumPyClient.context:1 +#: flwr.client.NumPyClient.context:1::1 of +msgid "Getter for `Context` client attribute." +msgstr "" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "Return True if S ends with the specified suffix, False otherwise." -msgstr "如果 S 以指定后缀结束,则返回 True,否则返回 False。" +#: ../../source/ref-api/flwr.common.Parameters.rst:2 +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of +msgid "Parameters" +msgstr "参数" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.client.Client.evaluate:3 of msgid "" -":py:obj:`removeprefix `\\ " -"\\(prefix\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removeprefix:1 of -#, fuzzy -msgid "Return a str with the given prefix string removed if present." -msgstr "返回一个字符串,如果存在,则去掉给定的前缀字符串。" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Returns" +msgstr "返回" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy +#: flwr.client.client.Client.evaluate:8 of msgid "" -":py:obj:`removesuffix `\\ " -"\\(suffix\\, \\/\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.removesuffix:1 of -#, fuzzy -msgid "Return a str with the given suffix string removed if present." -msgstr "返回一个字符串,如果存在给定的后缀字符串,则将其删除。" +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of +msgid "Return type" +msgstr "返回类型" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isascii `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "训练指令,包含从服务器接收的(全局)模型参数,以及用于定制本地训练过程的配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isascii:1 of -#, fuzzy -msgid "Return True if all characters in the string are ASCII, False otherwise." -msgstr "如果字符串中的所有字符都是 ASCII 码,则返回 True,否则返回 False。" +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "训练结果包含更新的参数和其他详细信息,如用于训练的本地训练示例的数量。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`islower `\\ \\(\\)" -msgstr ":py:obj:`now `\\ \\(\\)" +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "从服务器接收的获取参数指令包含配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.islower:1 of -#, fuzzy -msgid "Return True if the string is a lowercase string, False otherwise." -msgstr "如果字符串是小写字符串,则返回 True,否则返回 False。" +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "当前的本地模型参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isupper `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "从服务器接收的获取属性指令包含配置值字典。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isupper:1 of -#, fuzzy -msgid "Return True if the string is an uppercase string, False otherwise." -msgstr "如果字符串是大写字符串,则返回 True,否则返回 False。" +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "当前客户端属性。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.ClientApp.rst:2 #, fuzzy -msgid ":py:obj:`istitle `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "ClientApp" +msgstr "客户端" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.istitle:1 of +#: flwr.client.client_app.ClientApp:1 flwr.client.mod.localdp_mod.LocalDpMod:1 +#: flwr.common.constant.MessageType:1 flwr.common.constant.MessageTypeLegacy:1 +#: flwr.common.context.Context:1 flwr.common.message.Error:1 +#: flwr.common.message.Message:1 flwr.common.message.Metadata:1 +#: flwr.common.record.parametersrecord.Array:1 +#: flwr.common.record.recordset.RecordSet:1 flwr.common.typing.ClientMessage:1 +#: flwr.common.typing.DisconnectRes:1 flwr.common.typing.EvaluateIns:1 +#: flwr.common.typing.EvaluateRes:1 flwr.common.typing.FitIns:1 +#: flwr.common.typing.FitRes:1 flwr.common.typing.GetParametersIns:1 +#: flwr.common.typing.GetParametersRes:1 flwr.common.typing.GetPropertiesIns:1 +#: flwr.common.typing.GetPropertiesRes:1 flwr.common.typing.Parameters:1 +#: flwr.common.typing.ReconnectIns:1 flwr.common.typing.ServerMessage:1 +#: flwr.common.typing.Status:1 flwr.server.history.History:1 +#: flwr.server.server.Server:1 flwr.server.server_app.ServerApp:1 +#: flwr.server.server_config.ServerConfig:1 +#: flwr.server.serverapp_components.ServerAppComponents:1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of #, fuzzy -msgid "Return True if the string is a title-cased string, False otherwise." -msgstr "如果字符串是带标题的字符串,则返回 True,否则返回 False。" +msgid "Bases: :py:class:`object`" +msgstr "Bases: :py:class:`object`" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.app.start_client:41 flwr.client.app.start_numpy_client:36 +#: flwr.client.client_app.ClientApp:4 +#: flwr.client.client_app.ClientApp.evaluate:4 +#: flwr.client.client_app.ClientApp.query:4 +#: flwr.client.client_app.ClientApp.train:4 +#: flwr.client.mod.localdp_mod.LocalDpMod:22 +#: flwr.common.record.configsrecord.ConfigsRecord:20 +#: flwr.common.record.metricsrecord.MetricsRecord:19 +#: flwr.common.record.parametersrecord.ParametersRecord:22 +#: flwr.common.record.recordset.RecordSet:23 flwr.server.app.start_server:41 +#: flwr.server.server_app.ServerApp:4 flwr.server.server_app.ServerApp.main:4 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:29 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:21 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:14 +#: of +msgid "Examples" +msgstr "实例" + +#: flwr.client.client_app.ClientApp:5 of #, fuzzy -msgid ":py:obj:`isspace `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +msgid "" +"Assuming a typical `Client` implementation named `FlowerClient`, you can " +"wrap it in a `ClientApp` as follows:" +msgstr "假定有一个名为 `FlowerClient` 的典型 `Client` 实现,可以将其封装在一个 `ClientApp` 中,如下所示:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isspace:1 of +#: flwr.client.client_app.ClientApp:16 of #, fuzzy -msgid "Return True if the string is a whitespace string, False otherwise." -msgstr "如果字符串是空白字符串,则返回 True,否则返回 False。" +msgid "" +"If the above code is in a Python module called `client`, it can be " +"started as follows:" +msgstr "如果上述代码位于一个名为 \"客户端 \"的 Python 模块中,则可以按如下方式启动它:" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp:21 of #, fuzzy -msgid ":py:obj:`isdecimal `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid "" +"In this `client:app` example, `client` refers to the Python module " +"`client.py` in which the previous code lives in and `app` refers to the " +"global attribute `app` that points to an object of type `ClientApp`." +msgstr "" +"在这个 `client:app` 例子中,`client` 指的是前面代码所在的 Python 模块 `client.py`,而 `app` " +"指的是指向 `ClientApp` 类型对象的全局属性 `app` 。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdecimal:1 of +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid "Return True if the string is a decimal string, False otherwise." -msgstr "如果字符串是十进制字符串,则返回 True,否则返回 False。" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isdigit `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isdigit:1 of -#, fuzzy -msgid "Return True if the string is a digit string, False otherwise." -msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isnumeric `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`evaluate `\\ \\(\\)" +msgstr ":py:obj:`evaluate `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isnumeric:1 of +#: flwr.client.client_app.ClientApp.evaluate:1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid "Return True if the string is a numeric string, False otherwise." -msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" +msgid "Return a decorator that registers the evaluate fn with the client app." +msgstr "返回一个装饰器,用于向客户端程序注册评估 fn。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid ":py:obj:`isalpha `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`query `\\ \\(\\)" +msgstr ":py:obj:`query `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalpha:1 of +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.query:1 of #, fuzzy -msgid "Return True if the string is an alphabetic string, False otherwise." -msgstr "如果字符串是字母字符串,则返回 True,否则返回 False。" +msgid "Return a decorator that registers the query fn with the client app." +msgstr "返回一个向客户端应用程序注册查询 fn 的装饰器。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.client.client_app.ClientApp.evaluate:1::1 of #, fuzzy -msgid ":py:obj:`isalnum `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`train `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isalnum:1 of +#: flwr.client.client_app.ClientApp.evaluate:1::1 +#: flwr.client.client_app.ClientApp.train:1 of #, fuzzy -msgid "Return True if the string is an alpha-numeric string, False otherwise." -msgstr "如果字符串是字母数字字符串,则返回 True,否则返回 False。" +msgid "Return a decorator that registers the train fn with the client app." +msgstr "返回一个装饰器,用于在客户端应用程序中注册火车 fn。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`isidentifier `\\ \\(\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:2 +msgid "NumPyClient" +msgstr "NumPyClient" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isidentifier:1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid "Return True if the string is a valid Python identifier, False otherwise." -msgstr "如果字符串是有效的 Python 标识符,则返回 True,否则返回 False。" +msgid "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`evaluate `\\ \\(parameters\\, " +"config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`isprintable `\\ \\(\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`fit `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`fit `\\ \\(parameters\\, config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.isprintable:1 of -#, fuzzy -msgid "Return True if the string is printable, False otherwise." -msgstr "如果字符串可打印,则返回 True,否则返回 False。" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "使用本地数据集训练所提供的参数。" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" -msgstr ":py:obj:`PING `\\" +msgid ":py:obj:`get_context `\\ \\(\\)" +msgstr ":py:obj:`get_context `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.zfill:1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy msgid "" -"Pad a numeric string with zeros on the left, to fill a field of the given" -" width." -msgstr "在数字字符串左侧填充零,以填满给定宽度的字段。" +":py:obj:`get_parameters `\\ " +"\\(config\\)" +msgstr "" +":py:obj:`get_parameters `\\ " +"\\(config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy msgid "" -":py:obj:`format `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +":py:obj:`get_properties `\\ " +"\\(config\\)" msgstr "" -":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " -"\\*\\*kwargs\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "Return a formatted version of S, using substitutions from args and kwargs." -msgstr "使用 args 和 kwargs 的替换,返回 S 的格式化版本。" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`format_map `\\ \\(mapping\\)" -msgstr ":py:obj:`EventType `\\ \\(value\\)" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid "Return a formatted version of S, using substitutions from mapping." -msgstr "使用映射中的替换,返回 S 的格式化版本。" - -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#, fuzzy -msgid ":py:obj:`maketrans `\\" -msgstr ":py:obj:`TRAIN `\\" +":py:obj:`get_properties `\\ " +"\\(config\\)" -#: ../../source/ref-api/flwr.common.EventType.rst:163::1 -#: flwr.common.EventType.maketrans:1 of -#, fuzzy -msgid "Return a translation table usable for str.translate()." -msgstr "返回可用于 str.translate() 的翻译表。" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "返回客户端的属性集。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`PING `\\" -msgstr ":py:obj:`PING `\\" +msgid "" +":py:obj:`set_context `\\ " +"\\(context\\)" +msgstr "" +":py:obj:`set_context `\\ " +"\\(context\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 #, fuzzy -msgid ":py:obj:`START_CLIENT_ENTER `\\" -msgstr ":py:obj:`START_CLIENT_ENTER `\\" +msgid ":py:obj:`to_client `\\ \\(\\)" +msgstr ":py:obj:`to_client `\\ \\(\\)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`START_CLIENT_LEAVE `\\" -msgstr ":py:obj:`START_CLIENT_LEAVE `\\" +#: ../../source/ref-api/flwr.client.NumPyClient.rst:44::1 +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "将对象转换为客户类型并返回。" -#: flwr.common.EventType.capitalize:1::1 of +#: flwr.client.NumPyClient.context:1::1 of #, fuzzy -msgid ":py:obj:`START_SERVER_ENTER `\\" -msgstr ":py:obj:`START_SERVER_ENTER `\\" +msgid ":py:obj:`context `\\" +msgstr ":py:obj:`context `\\" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`START_SERVER_LEAVE `\\" -msgstr ":py:obj:`START_SERVER_LEAVE `\\" +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "当前(全局)模型参数。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of msgid "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_DRIVER_API_ENTER " -"`\\" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "允许服务器影响客户端评估的配置参数。它可用于将任意值从服务器传送到客户端,例如,影响用于评估的示例数量。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." msgstr "" -":py:obj:`RUN_DRIVER_API_LEAVE " -"`\\" +"**loss** (*float*) -- 模型在本地数据集上的评估损失值。**num_examples** (*int*) -- " +"用于评估的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of msgid "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" -msgstr "" -":py:obj:`RUN_FLEET_API_ENTER " -"`\\" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "**loss** (*float*) -- 模型在本地数据集上的评估损失值。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "**num_examples** (*int*) -- 用于评估的示例数量。" + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of msgid "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." msgstr "" -":py:obj:`RUN_FLEET_API_LEAVE " -"`\\" +"**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 bool、bytes、float、int 或 " +"str 类型值的字典。它可用于将任意值传回服务器。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of msgid "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +"自 Flower 0.19 起,之前的返回类型格式(int、float、float)和扩展格式(int、float、float、Dict[str," +" Scalar])已被弃用和移除。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.fit:5 of msgid "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" -msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "允许服务器影响客户端训练的配置参数。它可用于将任意值从服务器传送到客户端,例如设置(本地)训练遍历数。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.fit:11 of msgid "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." msgstr "" -":py:obj:`START_SIMULATION_ENTER " -"`\\" +"**parameters** (*NDArrays*) -- 本地更新的模型参数。**num_examples** (*int*) -- " +"用于训练的示例数量。**metrics** (*Dict[str, Scalar]*) -- 将任意字符串键映射到 " +"bool、bytes、float、int 或 str 类型值的字典。它可用于将任意值传回服务器。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "**parameters** (*NDArrays*) -- 本地更新的模型参数。" + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "**num_examples** (*int*) -- 用于训练的数据数量。" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of msgid "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" -msgstr "" -":py:obj:`START_SIMULATION_LEAVE " -"`\\" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些参数以及一些标量属性。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`DRIVER_CONNECT `\\" -msgstr ":py:obj:`DRIVER_CONNECT `\\" +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "**parameters** -- NumPy ndarrays 的本地模型参数列表。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`DRIVER_DISCONNECT `\\" -msgstr ":py:obj:`DRIVER_DISCONNECT `\\" +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "服务器请求的配置参数。这可以用来告诉客户端需要哪些属性以及一些标量属性。" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy -msgid ":py:obj:`START_DRIVER_ENTER `\\" -msgstr ":py:obj:`START_DRIVER_ENTER `\\" +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" +"**properties** -- 将任意字符串键映射到 bool、bytes、float、int 或 str " +"类型值的字典。它可用于将任意属性值传回服务器。" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:2 #, fuzzy -msgid ":py:obj:`START_DRIVER_LEAVE `\\" -msgstr ":py:obj:`START_DRIVER_LEAVE `\\" +msgid "mod" +msgstr "模块" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" +":py:obj:`adaptiveclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -":py:obj:`RUN_CLIENT_APP_ENTER " -"`\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:1 of #, fuzzy +msgid "Client-side adaptive clipping modifier." +msgstr "客户端逻辑" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" +":py:obj:`fixedclipping_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -":py:obj:`RUN_CLIENT_APP_LEAVE " -"`\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:1 of #, fuzzy -msgid "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" +msgid "Client-side fixed clipping modifier." +msgstr "客户端逻辑" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#, fuzzy +msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +msgstr ":py:obj:`Client `\\ \\(\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." msgstr "" -":py:obj:`RUN_SERVER_APP_ENTER " -"`\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy msgid "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +":py:obj:`message_size_mod `\\ \\(msg\\," +" ctxt\\, call\\_next\\)" msgstr "" -":py:obj:`RUN_SERVER_APP_LEAVE " -"`\\" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.message_size_mod:1 of +#, fuzzy +msgid "Message size mod." +msgstr "信息类型。" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPERNODE_ENTER " -"`\\" +":py:obj:`parameters_size_mod `\\ " +"\\(msg\\, ctxt\\, call\\_next\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.comms_mods.parameters_size_mod:1 of +#, fuzzy +msgid "Parameters size mod." +msgstr "参数" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPERNODE_LEAVE " -"`\\" +":py:obj:`secagg_mod `\\ \\(msg\\, ctxt\\, " +"call\\_next\\)" +msgstr ":py:obj:`set_context `\\ \\(context\\)" + +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secagg_mod.secagg_mod:1 of +msgid "Handle incoming message and return results, following the SecAgg protocol." msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" -#: flwr.common.EventType.capitalize:1::1 of +#: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy msgid "" -":py:obj:`RUN_SUPEREXEC_ENTER " -"`\\" +":py:obj:`secaggplus_mod `\\ \\(msg\\, " +"ctxt\\, call\\_next\\)" msgstr "" -":py:obj:`RUN_SUPERLINK_ENTER " -"`\\" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" -#: flwr.common.EventType.capitalize:1::1 of -#, fuzzy +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.secure_aggregation.secaggplus_mod.secaggplus_mod:1 of msgid "" -":py:obj:`RUN_SUPEREXEC_LEAVE " -"`\\" +"Handle incoming message and return results, following the SecAgg+ " +"protocol." msgstr "" -":py:obj:`RUN_SUPERLINK_LEAVE " -"`\\" -#: flwr.common.EventType.capitalize:3 of +#: ../../source/ref-api/flwr.client.mod.rst:35::1 #, fuzzy msgid "" -"More specifically, make the first character have upper case and the rest " -"lower case." -msgstr "更具体地说,让第一个字符大写,其余字符小写。" +":py:obj:`LocalDpMod `\\ \\(clipping\\_norm\\," +" sensitivity\\, ...\\)" +msgstr "" +":py:obj:`ClientApp `\\ \\(\\[client\\_fn\\, " +"mods\\]\\)" -#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 -#: flwr.common.EventType.rjust:3 of +#: ../../source/ref-api/flwr.client.mod.rst:35::1 +#: flwr.client.mod.localdp_mod.LocalDpMod:1 of #, fuzzy -msgid "Padding is done using the specified fill character (default is a space)." -msgstr "使用指定的填充字符(默认为空格)进行填充。" +msgid "Modifier for local differential privacy." +msgstr "差分隐私" -#: flwr.common.EventType.count:1 of +#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst:2 #, fuzzy +msgid "LocalDpMod" +msgstr "本地 DP 模式" + +#: flwr.client.mod.localdp_mod.LocalDpMod:3 of msgid "" -"Return the number of non-overlapping occurrences of substring sub in " -"string S[start:end]. Optional arguments start and end are interpreted as" -" in slice notation." -msgstr "返回子串 sub 在字符串 S[start:end] 中非重叠出现的次数。 可选参数 start 和 end 按切分符号解释。" +"This mod clips the client model updates and adds noise to the params " +"before sending them to the server." +msgstr "" -#: flwr.common.EventType.encode:3 of -#, fuzzy -msgid "encoding" -msgstr "编码" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:12 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:10 +#: flwr.client.mod.localdp_mod.LocalDpMod:6 of +msgid "It operates on messages of type `MessageType.TRAIN`." +msgstr "" -#: flwr.common.EventType.encode:4 of +#: flwr.client.mod.localdp_mod.LocalDpMod:8 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:15 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:8 +#: of #, fuzzy -msgid "The encoding in which to encode the string." -msgstr "字符串的编码。" +msgid "The value of the clipping norm." +msgstr "削波法线的值。" -#: flwr.common.EventType.encode:9 of -#, fuzzy -msgid "errors" -msgstr "错误" +#: flwr.client.mod.localdp_mod.LocalDpMod:10 of +msgid "The sensitivity of the client model." +msgstr "" -#: flwr.common.EventType.encode:6 of -#, fuzzy +#: flwr.client.mod.localdp_mod.LocalDpMod:12 of msgid "" -"The error handling scheme to use for encoding errors. The default is " -"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " -"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " -"as any other name registered with codecs.register_error that can handle " -"UnicodeEncodeErrors." +"The privacy budget. Smaller value of epsilon indicates a higher level of " +"privacy protection." msgstr "" -"编码错误的错误处理方案。默认值为 \"strict\",即编码错误会引发 UnicodeEncodeError。 其他可能的值包括 " -"\"ignore\"、\"replace \"和 \"xmlcharrefreplace\",以及通过 codecs.register_error" -" 注册的、可处理 UnicodeEncodeErrror 的其他名称。" -#: flwr.common.EventType.endswith:1 of -#, fuzzy +#: flwr.client.mod.localdp_mod.LocalDpMod:15 of msgid "" -"Return True if S ends with the specified suffix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. suffix can also be a tuple of strings " -"to try." +"The failure probability. The probability that the privacy mechanism fails" +" to provide the desired level of privacy. A smaller value of delta " +"indicates a stricter privacy guarantee." msgstr "" -"如果 S 以指定后缀结束,则返回 True,否则返回 False。如果起始位置可选,则从该位置开始测试 S。如果使用可选的 " -"end,则在该位置停止比较 S。后缀也可以是要尝试的字符串元组。" -#: flwr.common.EventType.expandtabs:3 of -#, fuzzy -msgid "If tabsize is not given, a tab size of 8 characters is assumed." -msgstr "如果未给出制表符大小,则假定制表符大小为 8 个字符。" +#: flwr.client.mod.localdp_mod.LocalDpMod:23 of +msgid "Create an instance of the local DP mod and add it to the client-side mods:" +msgstr "" -#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of -#, fuzzy -msgid "" -"Return the lowest index in S where substring sub is found, such that sub " -"is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." -msgstr "返回在 S 中找到子串 sub 的最低索引,即 sub 包含在 S[start:end] 中。 可选参数 start 和 end 按切分符号解释。" - -#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of -#, fuzzy -msgid "Return -1 on failure." -msgstr "失败时返回-1。" +#: ../../source/ref-api/flwr.client.mod.adaptiveclipping_mod.rst:2 +msgid "adaptiveclipping\\_mod" +msgstr "" -#: flwr.common.EventType.format:1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:3 of #, fuzzy msgid "" -"Return a formatted version of S, using substitutions from args and " -"kwargs. The substitutions are identified by braces ('{' and '}')." -msgstr "使用来自 args 和 kwargs 的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideAdaptiveClipping server-side strategy " +"wrapper." +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" -#: flwr.common.EventType.format_map:1 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:6 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:6 of #, fuzzy +msgid "The wrapper sends the clipping_norm value to the client." +msgstr "向客户发送近端因子mu" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:8 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:8 of +msgid "This mod clips the client model updates before sending them to the server." +msgstr "" + +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:10 of msgid "" -"Return a formatted version of S, using substitutions from mapping. The " -"substitutions are identified by braces ('{' and '}')." -msgstr "使用映射中的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" +"It also sends KEY_NORM_BIT to the server for computing the new clipping " +"value." +msgstr "" -#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:15 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:13 +#: flwr.server.driver.driver.Driver.send_and_receive:18 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 +#: of #, fuzzy -msgid "Raises ValueError when the substring is not found." -msgstr "如果未找到子串,则引发 ValueError。" +msgid "Notes" +msgstr "无" -#: flwr.common.EventType.isalnum:3 of -#, fuzzy -msgid "" -"A string is alpha-numeric if all characters in the string are alpha-" -"numeric and there is at least one character in the string." -msgstr "如果字符串中的所有字符都是字母数字,且字符串中至少有一个字符,则该字符串为字母数字字符串。" +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:16 +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:14 of +msgid "Consider the order of mods when using multiple." +msgstr "" -#: flwr.common.EventType.isalpha:3 of +#: flwr.client.mod.centraldp_mods.adaptiveclipping_mod:18 of +msgid "Typically, adaptiveclipping_mod should be the last to operate on params." +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.fixedclipping_mod.rst:2 #, fuzzy -msgid "" -"A string is alphabetic if all characters in the string are alphabetic and" -" there is at least one character in the string." -msgstr "如果字符串中的所有字符都是字母,并且字符串中至少有一个字符,那么该字符串就是字母字符串。" +msgid "fixedclipping\\_mod" +msgstr "剪贴" -#: flwr.common.EventType.isascii:3 of +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:3 of #, fuzzy msgid "" -"ASCII characters have code points in the range U+0000-U+007F. Empty " -"string is ASCII too." -msgstr "ASCII 字符的码位范围为 U+0000-U+007F。空字符串也是 ASCII 字符。" +"This mod needs to be used with the " +"DifferentialPrivacyClientSideFixedClipping server-side strategy wrapper." +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" -#: flwr.common.EventType.isdecimal:3 of +#: flwr.client.mod.centraldp_mods.fixedclipping_mod:16 of +msgid "Typically, fixedclipping_mod should be the last to operate on params." +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.make_ffn.rst:2 +msgid "make\\_ffn" +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 +msgid "message\\_size\\_mod" +msgstr "" + +#: flwr.client.mod.comms_mods.message_size_mod:3 of +msgid "This mod logs the size in bytes of the message being transmited." +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.parameters_size_mod.rst:2 #, fuzzy +msgid "parameters\\_size\\_mod" +msgstr "参数" + +#: flwr.client.mod.comms_mods.parameters_size_mod:3 of msgid "" -"A string is a decimal string if all characters in the string are decimal " -"and there is at least one character in the string." -msgstr "如果字符串中的所有字符都是十进制,并且字符串中至少有一个字符是十进制,那么该字符串就是十进制字符串。" +"This mod logs the number of parameters transmitted in the message as well" +" as their size in bytes." +msgstr "" -#: flwr.common.EventType.isdigit:3 of +#: ../../source/ref-api/flwr.client.mod.secagg_mod.rst:2 +msgid "secagg\\_mod" +msgstr "" + +#: ../../source/ref-api/flwr.client.mod.secaggplus_mod.rst:2 #, fuzzy -msgid "" -"A string is a digit string if all characters in the string are digits and" -" there is at least one character in the string." -msgstr "如果字符串中的所有字符都是数字,并且字符串中至少有一个字符,那么该字符串就是数字字符串。" +msgid "secaggplus\\_mod" +msgstr "工作流程" -#: flwr.common.EventType.isidentifier:3 of +#: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy +msgid "start\\_client" +msgstr "启动客户端" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:9 of msgid "" -"Call keyword.iskeyword(s) to test whether string s is a reserved " -"identifier, such as \"def\" or \"class\"." -msgstr "调用 keyword.iskeyword(s) 测试字符串 s 是否为保留标识符,如 \"def \"或 \"class\"。" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" +"服务器的 IPv4 或 IPv6 地址:如果 Flower 服务器在同一台机器上运行,端口为 " +"8080,则`server_address`应为`\"[::]:8080\"`。" -#: flwr.common.EventType.islower:3 of -#, fuzzy +#: flwr.client.app.start_client:7 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "用于实例化客户端的可调用程序。(默认值:无)" + +#: flwr.client.app.start_client:9 of msgid "" -"A string is lowercase if all cased characters in the string are lowercase" -" and there is at least one cased character in the string." -msgstr "如果字符串中的所有大小写字符都是小写,且字符串中至少有一个大小写字符,则该字符串为小写字符串。" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "抽象基类 `flwr.client.Client` 的实现(默认值:无)" -#: flwr.common.EventType.isnumeric:3 of -#, fuzzy +#: flwr.client.app.start_client:12 flwr.client.app.start_numpy_client:15 of msgid "" -"A string is numeric if all characters in the string are numeric and there" -" is at least one character in the string." -msgstr "如果字符串中的所有字符都是数字,且字符串中至少有一个字符,则该字符串为数字字符串。" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" +"可与 Flower 服务器交换的 gRPC 信息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"服务器需要以相同的值启动(请参阅 `flwr.server.start_server`),否则它将不知道增加的限制并阻止更大的消息。" -#: flwr.common.EventType.isprintable:3 of -#, fuzzy +#: flwr.client.app.start_client:19 flwr.client.app.start_numpy_client:22 of msgid "" -"A string is printable if all of its characters are considered printable " -"in repr() or if it is empty." -msgstr "如果字符串的所有字符在 repr() 中都被认为是可打印的,或者字符串为空,那么该字符串就是可打印的。" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "字节字符串或路径字符串形式的 PEM 编码根证书。如果提供,将使用这些证书与启用 SSL 的 Flower 服务器建立安全连接。" -#: flwr.common.EventType.isspace:3 of +#: flwr.client.app.start_client:23 flwr.client.app.start_numpy_client:26 of #, fuzzy msgid "" -"A string is whitespace if all characters in the string are whitespace and" -" there is at least one character in the string." -msgstr "如果字符串中的所有字符都是空格,且字符串中至少有一个字符,则该字符串为空格。" +"Starts an insecure gRPC connection when True. Enables HTTPS connection " +"when False, using system certificates if `root_certificates` is None." +msgstr "" +"为 True 时启动不安全的 gRPC 连接。False 时启用 HTTPS 连接,如果 `root_certificates` 为 " +"None,则使用系统证书。" -#: flwr.common.EventType.istitle:3 of -#, fuzzy +#: flwr.client.app.start_client:26 flwr.client.app.start_numpy_client:29 of msgid "" -"In a title-cased string, upper- and title-case characters may only follow" -" uncased characters and lowercase characters only cased ones." -msgstr "在标题大小写字符串中,大写和标题大小写字符只能跟在无大小写字符之后,小写字符只能跟在有大小写字符之后。" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" +"配置传输层:允许的值包括 - 'grpc-bidi': gRPC,双向流 - 'grpc-rere': gRPC,请求-响应(实验性) - " +"'rest': HTTP(实验性)" -#: flwr.common.EventType.isupper:3 of +#: flwr.client.app.start_client:31 of #, fuzzy msgid "" -"A string is uppercase if all cased characters in the string are uppercase" -" and there is at least one cased character in the string." -msgstr "如果字符串中所有带大小写的字符都是大写,并且字符串中至少有一个带大小写的字符,则该字符串为大写字符串。" +"The maximum number of times the client will try to connect to the server " +"before giving up in case of a connection error. If set to None, there is " +"no limit to the number of tries." +msgstr "客户端在出现连接错误时放弃连接服务器的最大尝试次数。如果设置为 \"无\",则不限制尝试次数。" -#: flwr.common.EventType.join:3 of +#: flwr.client.app.start_client:35 of #, fuzzy msgid "" -"The string whose method is called is inserted in between each given " -"string. The result is returned as a new string." -msgstr "方法被调用的字符串会被插入每个给定的字符串之间。结果将以新字符串的形式返回。" +"The maximum duration before the client stops trying to connect to the " +"server in case of connection error. If set to None, there is no limit to " +"the total time." +msgstr "在出现连接错误时,客户端停止尝试连接服务器之前的最长持续时间。如果设置为 \"无\",则总时间没有限制。" -#: flwr.common.EventType.join:6 of -#, fuzzy -msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -msgstr "示例:'.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +#: flwr.client.app.start_client:42 flwr.client.app.start_numpy_client:37 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "使用不安全的服务器连接启动 gRPC 客户端:" -#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 -#: flwr.common.EventType.strip:3 of +#: flwr.client.app.start_client:49 flwr.client.app.start_numpy_client:44 of #, fuzzy -msgid "If chars is given and not None, remove characters in chars instead." -msgstr "如果给定的是 chars 而不是 None,则删除 chars 中的字符。" +msgid "Starting an SSL-enabled gRPC client using system certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.common.EventType.maketrans:3 of +#: flwr.client.app.start_client:60 flwr.client.app.start_numpy_client:52 of #, fuzzy -msgid "" -"If there is only one argument, it must be a dictionary mapping Unicode " -"ordinals (integers) or characters to Unicode ordinals, strings or None. " -"Character keys will be then converted to ordinals. If there are two " -"arguments, they must be strings of equal length, and in the resulting " -"dictionary, each character in x will be mapped to the character at the " -"same position in y. If there is a third argument, it must be a string, " -"whose characters will be mapped to None in the result." -msgstr "" -"如果只有一个参数,则必须是一个将 Unicode 序号(整数)或字符映射到 Unicode 序号、字符串或 None " -"的字典。字符键将被转换为序号。如果有两个参数,它们必须是长度相等的字符串,在生成的字典中,x 中的每个字符将被映射到 y 中相同位置的字符。" +msgid "Starting an SSL-enabled gRPC client using provided certificates:" +msgstr "启动支持 SSL 的 gRPC 客户端:" -#: flwr.common.EventType.partition:3 of +#: ../../source/ref-api/flwr.client.start_numpy_client.rst:2 #, fuzzy -msgid "" -"This will search for the separator in the string. If the separator is " -"found, returns a 3-tuple containing the part before the separator, the " -"separator itself, and the part after it." -msgstr "它会在字符串中搜索分隔符。 如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" +msgid "start\\_numpy\\_client" +msgstr "start_numpy_client" -#: flwr.common.EventType.partition:7 of +#: flwr.client.app.start_numpy_client:5 of #, fuzzy msgid "" -"If the separator is not found, returns a 3-tuple containing the original " -"string and two empty strings." -msgstr "如果找不到分隔符,则返回一个包含原始字符串和两个空字符串的 3 元组。" - -#: flwr.common.EventType.removeprefix:3 of -#, fuzzy -msgid "" -"If the string starts with the prefix string, return string[len(prefix):]." -" Otherwise, return a copy of the original string." -msgstr "如果字符串以前缀字符串开始,则返回 string[len(prefix):]。否则,返回原始字符串的副本。" +"This function is deprecated since 1.7.0. Use " +":code:`flwr.client.start_client` instead and first convert your " +":code:`NumPyClient` to type :code:`flwr.client.Client` by executing its " +":code:`to_client()` method." +msgstr "" +"自 1.7.0 起该函数已被弃用。请使用 :code:`flwr.client.start_client`,并首先通过执行 " +":code:`to_client()`方法将 :code:`NumPyClient`转换为 :code:`flwr.client.Client`。" -#: flwr.common.EventType.removesuffix:3 of +#: flwr.client.app.start_numpy_client:13 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "抽象基类 `flwr.client.NumPyClient` 的实现。" + +#: ../../source/ref-api/flwr.common.rst:2 +msgid "common" +msgstr "常见" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "" -"If the string ends with the suffix string and that suffix is not empty, " -"return string[:-len(suffix)]. Otherwise, return a copy of the original " -"string." -msgstr "如果字符串以后缀字符串结尾,且后缀不为空,则返回 string[:-len(suffix)]。否则,返回原始字符串的副本。" +msgid ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" +msgstr ":py:obj:`array_from_numpy `\\ \\(ndarray\\)" -#: flwr.common.EventType.replace:5 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.record.conversion_utils.array_from_numpy:1 of #, fuzzy -msgid "count" -msgstr "背景" +msgid "Create Array from NumPy ndarray." +msgstr "将参数对象转换为 NumPy ndarrays。" -#: flwr.common.EventType.replace:4 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "" -"Maximum number of occurrences to replace. -1 (the default value) means " -"replace all occurrences." -msgstr "要替换的最大出现次数。-1(默认值)表示替换所有出现次数。" +msgid ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" +msgstr ":py:obj:`bytes_to_ndarray `\\ \\(tensor\\)" -#: flwr.common.EventType.replace:7 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "从字节反序列化 NumPy ndarray。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"If the optional argument count is given, only the first count occurrences" -" are replaced." -msgstr "如果给出可选参数 count,则只替换第一个计数出现的次数。" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" +msgstr "" +":py:obj:`configure `\\ \\(identifier\\[\\, " +"filename\\, host\\]\\)" -#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "配置将日志记录到文件和/或远程日志服务器。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"Return the highest index in S where substring sub is found, such that sub" -" is contained within S[start:end]. Optional arguments start and end are " -"interpreted as in slice notation." -msgstr "返回在 S 中找到子串 sub 且 sub 包含在 S[start:end] 中的最高索引。 可选参数 start 和 end 按切分符号解释。" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" +msgstr "" +":py:obj:`event `\\ \\(event\\_type\\[\\, " +"event\\_details\\]\\)" -#: flwr.common.EventType.rpartition:3 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.telemetry.event:1 of #, fuzzy -msgid "" -"This will search for the separator in the string, starting at the end. If" -" the separator is found, returns a 3-tuple containing the part before the" -" separator, the separator itself, and the part after it." -msgstr "它会从字符串的末尾开始搜索分隔符。如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" +msgid "Submit create_event to ThreadPoolExecutor to avoid blocking." +msgstr "将 create_event 提交给 ThreadPoolExecutor 以避免阻塞。" -#: flwr.common.EventType.rpartition:7 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"If the separator is not found, returns a 3-tuple containing two empty " -"strings and the original string." -msgstr "如果找不到分隔符,则返回一个包含两个空字符串和原始字符串的 3 元组。" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" +msgstr "" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of -#, fuzzy -msgid "sep" -msgstr "sep" +#: ../../source/ref-api/flwr.common.rst:30::1 logging.Logger.log:1 +#: of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "以整数严重性 \"级别 \"记录 \"msg % args\"。" -#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "The separator used to split the string." -msgstr "用于分割字符串的分隔符。" +msgid ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" +msgstr ":py:obj:`ndarray_to_bytes `\\ \\(ndarray\\)" -#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"When set to None (the default value), will split on any whitespace " -"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" -" empty strings from the result." -msgstr "当设置为 \"无\"(默认值)时,将对任何空白字符(包括 \\n \\r \\t \\f 和空格)进行分割,并从结果中剔除空字符串。" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters `\\ " +"\\(ndarrays\\)" -#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.ndarrays_to_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarrays_to_parameters:1 +#: of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "将 NumPy ndarrays 转换为参数对象。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy -msgid "maxsplit" -msgstr "最大分割" +msgid ":py:obj:`now `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "从 time.time() 生成日期时间,时区设置为 UTC。" + +#: ../../source/ref-api/flwr.common.rst:30::1 #, fuzzy msgid "" -"Maximum number of splits (starting from the left). -1 (the default value)" -" means no limit." -msgstr "最大分割次数(从左边开始)。-1(默认值)表示没有限制。" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" +msgstr "" +":py:obj:`parameters_to_ndarrays `\\ " +"\\(parameters\\)" -#: flwr.common.EventType.rsplit:13 of -#, fuzzy -msgid "Splitting starts at the end of the string and works to the front." -msgstr "从琴弦末端开始分弦,一直到琴弦前端。" +#: ../../source/ref-api/flwr.common.rst:30::1 +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "将参数对象转换为 NumPy ndarrays。" -#: flwr.common.EventType.split:13 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Note, str.split() is mainly useful for data that has been intentionally " -"delimited. With natural text that includes punctuation, consider using " -"the regular expression module." -msgstr "注意,str.split() 主要适用于有意分隔的数据。 对于包含标点符号的自然文本,可以考虑使用正则表达式模块。" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" +msgstr "" +":py:obj:`Array `\\ \\(dtype\\, shape\\, stype\\, " +"data\\)" -#: flwr.common.EventType.splitlines:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.Array:1 of #, fuzzy -msgid "" -"Line breaks are not included in the resulting list unless keepends is " -"given and true." -msgstr "除非指定 keepends 为 true,否则换行符不会包含在生成的列表中。" +msgid "Array type." +msgstr "返回类型" -#: flwr.common.EventType.startswith:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Return True if S starts with the specified prefix, False otherwise. With " -"optional start, test S beginning at that position. With optional end, " -"stop comparing S at that position. prefix can also be a tuple of strings " -"to try." +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" msgstr "" -"如果 S 以指定的前缀开始,则返回 True,否则返回 False。如果选择 start,则从该位置开始测试 S。如果使用可选的 " -"end,则在该位置停止比较 S。" +":py:obj:`ClientMessage `\\ " +"\\(\\[get\\_properties\\_res\\, ...\\]\\)" -#: flwr.common.EventType.title:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "ClientMessage 是用于容纳一条结果信息的容器。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -"More specifically, words start with uppercased characters and all " -"remaining cased characters have lower case." -msgstr "更具体地说,单词以大写字母开头,其余所有大小写字符均为小写。" +msgid ":py:obj:`Code `\\ \\(value\\)" +msgstr ":py:obj:`Code `\\ \\(value\\)" -#: flwr.common.EventType.translate:5 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "客户端状态代码。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "table" -msgstr "数据库" +msgid ":py:obj:`Config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.common.EventType.translate:4 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Translation table, which must be a mapping of Unicode ordinals to Unicode" -" ordinals, strings, or None." -msgstr "翻译表,必须是 Unicode 序号到 Unicode 序号、字符串或无的映射。" +"alias of :py:class:`dict`\\ [:py:class:`str`, :py:class:`bool` | " +":py:class:`bytes` | :py:class:`float` | :py:class:`int` | " +":py:class:`str`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" -#: flwr.common.EventType.translate:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"The table must implement lookup/indexing via __getitem__, for instance a " -"dictionary or list. If this operation raises LookupError, the character " -"is left untouched. Characters mapped to None are deleted." +":py:obj:`ConfigsRecord `\\ " +"\\(\\[configs\\_dict\\, keep\\_input\\]\\)" msgstr "" -"表必须通过 __getitem__ 实现查找/索引,例如字典或列表。 如果该操作引发 LookupError,该字符将保持不变。 映射为 None" -" 的字符将被删除。" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.common.EventType.zfill:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy -msgid "The string is never truncated." -msgstr "字符串不会被截断。" +msgid "Configs record." +msgstr "配置日志记录" -#: ../../source/ref-api/flwr.common.FitIns.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "FitIns" -msgstr "FitIns" +msgid "" +":py:obj:`Context `\\ \\(node\\_id\\, " +"node\\_config\\, state\\, run\\_config\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.context.Context:1 of #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "Context of your run." +msgstr "您的运行状态。" -#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid ":py:obj:`DisconnectRes `\\ \\(reason\\)" +msgstr ":py:obj:`DisconnectRes `\\ \\(reason\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "客户端向服务器发送 DisconnectRes 信息。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "FitRes" -msgstr "FitRes" +msgid ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" +msgstr ":py:obj:`Error `\\ \\(code\\[\\, reason\\]\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Error:1 of #, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +msgid "A dataclass that stores information about an error that occurred." +msgstr "数据类,用于存储所发生错误的相关信息。" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +msgid "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" +msgstr "" +":py:obj:`EvaluateIns `\\ \\(parameters\\, " +"config\\)" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 -#, fuzzy -msgid ":py:obj:`num_examples `\\" -msgstr ":py:obj:`num_examples `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "评估客户端的指示。" -#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`metrics `\\" -msgstr ":py:obj:`metrics `\\" +msgid "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`EvaluateRes `\\ \\(status\\, loss\\, " +"num\\_examples\\, metrics\\)" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 -#, fuzzy -msgid "GetParametersIns" -msgstr "参数" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "评估客户端的反应。" -#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +msgid ":py:obj:`EventType `\\ \\(value\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 -#, fuzzy -msgid "GetParametersRes" -msgstr "参数" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "遥测事件类型。" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +msgid ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" +msgstr ":py:obj:`FitIns `\\ \\(parameters\\, config\\)" -#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 -#, fuzzy -msgid ":py:obj:`parameters `\\" -msgstr ":py:obj:`parameters `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "为客户提供安装说明。" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "GetPropertiesIns" -msgstr "GetPropertiesIns" +msgid "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" +msgstr "" +":py:obj:`FitRes `\\ \\(status\\, parameters\\, " +"num\\_examples\\, metrics\\)" -#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 -#, fuzzy -msgid ":py:obj:`config `\\" -msgstr ":py:obj:`config `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "来自客户端的合适回复。" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "GetPropertiesRes" -msgstr "GetPropertiesRes" +msgid ":py:obj:`GetParametersIns `\\ \\(config\\)" +msgstr ":py:obj:`GetParametersIns `\\ \\(config\\)" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 -#, fuzzy -msgid ":py:obj:`status `\\" -msgstr ":py:obj:`status `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "客户端的参数请求。" -#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`properties `\\" -msgstr ":py:obj:`properties `\\" +msgid "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" +msgstr "" +":py:obj:`GetParametersRes `\\ \\(status\\, " +"parameters\\)" -#: ../../source/ref-api/flwr.common.Message.rst:2 -#, fuzzy -msgid "Message" -msgstr "服务器端" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "要求返回参数时的响应。" -#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 -#: flwr.common.message.Message:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "A dataclass including information about the message to be executed." -msgstr "数据类型,包括要执行的信息的相关信息。" +msgid ":py:obj:`GetPropertiesIns `\\ \\(config\\)" +msgstr ":py:obj:`GetPropertiesIns `\\ \\(config\\)" -#: flwr.common.message.Message:5 of -#, fuzzy -msgid "" -"Holds records either sent by another entity (e.g. sent by the server-side" -" logic to a client, or vice-versa) or that will be sent to it." -msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到该实体的记录。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "客户端的属性请求。" -#: flwr.common.message.Message:8 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"A dataclass that captures information about an error that took place when" -" processing another message." -msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" +msgstr "" +":py:obj:`GetPropertiesRes `\\ \\(status\\, " +"properties\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "来自客户端的属性响应。" + +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -":py:obj:`create_error_reply `\\ " -"\\(error\\[\\, ttl\\]\\)" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" msgstr "" -":py:obj:`create_error_reply `\\ " -"\\(error\\, ttl\\)" +":py:obj:`Message `\\ \\(metadata\\[\\, content\\, " +"error\\]\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_error_reply:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Message:1 of #, fuzzy -msgid "Construct a reply message indicating an error happened." -msgstr "构建一条回复信息,说明发生了错误。" +msgid "State of your application from the viewpoint of the entity using it." +msgstr "从使用实体的角度看应用程序的状态。" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "" -":py:obj:`create_reply `\\ " -"\\(content\\[\\, ttl\\]\\)" -msgstr "" -":py:obj:`create_reply `\\ \\(content\\," -" ttl\\)" +msgid ":py:obj:`MessageType `\\ \\(\\)" +msgstr ":py:obj:`MessageType `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.create_reply:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageType:1 of #, fuzzy -msgid "Create a reply to this message with specified content and TTL." -msgstr "以指定的内容和 TTL 创建对该信息的回复。" +msgid "Message type." +msgstr "信息类型。" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`has_content `\\ \\(\\)" -msgstr ":py:obj:`has_content `\\ \\(\\)" +msgid ":py:obj:`MessageTypeLegacy `\\ \\(\\)" +msgstr ":py:obj:`MessageTypeLegacy `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_content:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.constant.MessageTypeLegacy:1 of #, fuzzy -msgid "Return True if message has content, else False." -msgstr "如果信息有内容,则返回 True,否则返回 False。" +msgid "Legacy message type." +msgstr "传统信息类型。" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`has_error `\\ \\(\\)" -msgstr ":py:obj:`has_error `\\ \\(\\)" +msgid "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" +msgstr "" +":py:obj:`Metadata `\\ \\(run\\_id\\, " +"message\\_id\\, src\\_node\\_id\\, ...\\)" -#: ../../source/ref-api/flwr.common.Message.rst:35::1 -#: flwr.common.message.Message.has_error:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.message.Metadata:1 of #, fuzzy -msgid "Return True if message has an error, else False." -msgstr "如果信息有错误,则返回 True,否则返回 False。" +msgid "A dataclass holding metadata associated with the current message." +msgstr "数据类型,包含与当前报文相关的元数据。" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`content `\\" -msgstr ":py:obj:`content `\\" +msgid ":py:obj:`Metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 -#: of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "The content of this message." -msgstr "评估客户端的反应。" +msgid "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" +msgstr "" +":py:obj:`MetricsRecord `\\ " +"\\(\\[metrics\\_dict\\, keep\\_input\\]\\)" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy -msgid ":py:obj:`error `\\" -msgstr ":py:obj:`error `\\" +msgid "Metrics recod." +msgstr "指标记录。" -#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "Error captured by this message." -msgstr "该信息捕捉到的错误。" +msgid ":py:obj:`NDArray `\\" +msgstr ":py:obj:`NDArray `\\" -#: flwr.common.Message.content:1::1 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`metadata `\\" -msgstr ":py:obj:`metadata `\\" +msgid "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" +msgstr "" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: flwr.common.message.Message.create_error_reply:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "The error that was encountered." -msgstr "遇到的错误。" +msgid ":py:obj:`NDArrays `\\" +msgstr ":py:obj:`NDArray `\\" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " -"msg.meta.created_at)" +"alias of :py:class:`list`\\ [:py:class:`~numpy.ndarray`\\ " +"[:py:obj:`~typing.Any`, :py:class:`~numpy.dtype`\\ " +"[:py:obj:`~typing.Any`]]]" msgstr "" -"该信息的有效时间(秒)。如果未设置,则将根据收到的信息过期前的剩余时间来设置。其计算公式为:ttl = msg.meta.ttl - " -"(reply.meta.created_at - msg.meta.created_at)" +"alias of :py:class:`~numpy.ndarray`\\ [:py:obj:`~typing.Any`, " +":py:class:`~numpy.dtype`\\ [:py:obj:`~typing.Any`]]" -#: flwr.common.message.Message.create_error_reply:5 -#: flwr.common.message.Message.create_reply:9 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"Time-to-live for this message in seconds. If unset, it will be set based " -"on the remaining time for the received message before it expires. This " -"follows the equation:" -msgstr "该信息的有效时间(秒)。如果未设置,则将根据接收到的信息过期前的剩余时间来设置。其计算公式如下" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" +msgstr "" +":py:obj:`Parameters `\\ \\(tensors\\, " +"tensor\\_type\\)" -#: flwr.common.message.Message.create_error_reply:9 -#: flwr.common.message.Message.create_reply:13 of -#, fuzzy -msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "模型参数。" -#: flwr.common.message.Message.create_reply:3 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy msgid "" -"The method generates a new `Message` as a reply to this message. It " -"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " -"this message and sets 'reply_to_message' to the ID of this message." +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" msgstr "" -"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " -"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " -"\"reply_to_message \"设置为该消息的 ID。" +":py:obj:`ParametersRecord `\\ " +"\\(\\[array\\_dict\\, keep\\_input\\]\\)" -#: flwr.common.message.Message.create_reply:7 of +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy -msgid "The content for the reply message." -msgstr "回复信息的内容。" +msgid "Parameters record." +msgstr "参数" -#: flwr.common.message.Message.create_reply:16 of +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "A new `Message` instance representing the reply." -msgstr "代表回复的新的 `Message` 实例。" +msgid ":py:obj:`Properties `\\" +msgstr ":py:obj:`properties `\\" -#: ../../source/ref-api/flwr.common.MessageType.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "MessageType" -msgstr "返回类型" +msgid ":py:obj:`ReconnectIns `\\ \\(seconds\\)" +msgstr ":py:obj:`ReconnectIns `\\ \\(seconds\\)" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 -#, fuzzy -msgid ":py:obj:`EVALUATE `\\" -msgstr ":py:obj:`EVALUATE `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "服务器发送给客户端的重新连接信息。" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`QUERY `\\" -msgstr ":py:obj:`QUERY `\\" +msgid "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" +msgstr "" +":py:obj:`RecordSet `\\ " +"\\(\\[parameters\\_records\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.record.recordset.RecordSet:1 of #, fuzzy -msgid ":py:obj:`TRAIN `\\" -msgstr ":py:obj:`TRAIN `\\" +msgid "RecordSet stores groups of parameters, metrics and configs." +msgstr "RecordSet 可存储参数、指标和配置组。" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid "MessageTypeLegacy" -msgstr "MessageTypeLegacy" +msgid "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" +msgstr "" +":py:obj:`ServerMessage `\\ " +"\\(\\[get\\_properties\\_ins\\, ...\\]\\)" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 -#, fuzzy -msgid ":py:obj:`GET_PARAMETERS `\\" -msgstr ":py:obj:`GET_PARAMETERS `\\" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "ServerMessage 是用于容纳一条指令信息的容器。" -#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#: ../../source/ref-api/flwr.common.rst:68::1 #, fuzzy -msgid ":py:obj:`GET_PROPERTIES `\\" -msgstr ":py:obj:`GET_PROPERTIES `\\" +msgid ":py:obj:`Status `\\ \\(code\\, message\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of -#, fuzzy -msgid "An identifier for the current run." -msgstr "当前运行的标识符。" +#: ../../source/ref-api/flwr.common.rst:68::1 +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "客户端状态。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of +#: ../../source/ref-api/flwr.common.Array.rst:2 #, fuzzy -msgid "An identifier for the current message." -msgstr "当前信息的标识符。" +msgid "Array" +msgstr "数组" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of +#: flwr.common.record.parametersrecord.Array:3 of #, fuzzy -msgid "An identifier for the node sending this message." -msgstr "发送此信息的节点的标识符。" +msgid "" +"A dataclass containing serialized data from an array-like or tensor-like " +"object along with some metadata about it." +msgstr "数据类,包含数组类或张量类对象的序列化数据以及相关元数据。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of +#: flwr.common.record.parametersrecord.Array:6 of #, fuzzy -msgid "An identifier for the node receiving this message." -msgstr "接收此信息的节点的标识符。" +msgid "" +"A string representing the data type of the serialised object (e.g. " +"`np.float32`)" +msgstr "表示序列化对象数据类型的字符串(例如 `np.float32`)" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of +#: flwr.common.record.parametersrecord.Array:8 of #, fuzzy -msgid "An identifier for the message this message replies to." -msgstr "该信息回复的信息的标识符。" +msgid "" +"A list representing the shape of the unserialized array-like object. This" +" is used to deserialize the data (depending on the serialization method) " +"or simply as a metadata field." +msgstr "代表未序列化数组对象形状的列表。它可用于反序列化数据(取决于序列化方法),或仅作为元数据字段使用。" -#: flwr.common.message.Metadata:13 of +#: flwr.common.record.parametersrecord.Array:12 of #, fuzzy msgid "" -"An identifier for grouping messages. In some settings, this is used as " -"the FL round." -msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" +"A string indicating the type of serialisation mechanism used to generate " +"the bytes in `data` from an array-like or tensor-like object." +msgstr "表示序列化机制类型的字符串,用于从类似数组或类似张量的对象中生成 `data` 中的字节。" -#: flwr.common.message.Metadata:16 of +#: flwr.common.record.parametersrecord.Array:15 of #, fuzzy -msgid "Time-to-live for this message in seconds." -msgstr "该信息的有效时间。" +msgid "A buffer of bytes containing the data." +msgstr "包含数据的字节缓冲区。" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of +#: ../../source/ref-api/flwr.common.Array.rst:26::1 #, fuzzy -msgid "A string that encodes the action to be executed on the receiving end." -msgstr "编码接收端要执行的操作的字符串。" +msgid ":py:obj:`numpy `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.common.message.Metadata:21 of +#: ../../source/ref-api/flwr.common.Array.rst:26::1 +#: flwr.common.record.parametersrecord.Array.numpy:1 of #, fuzzy -msgid "" -"An identifier that can be used when loading a particular data partition " -"for a ClientApp. Making use of this identifier is more relevant when " -"conducting simulations." -msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" +msgid "Return the array as a NumPy array." +msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid ":py:obj:`created_at `\\" -msgstr ":py:obj:`ttl `\\" +msgid ":py:obj:`dtype `\\" +msgstr ":py:obj:`dtype `\\" -#: flwr.common.Metadata.created_at:1 -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid "Unix timestamp when the message was created." -msgstr "创建信息时的 Unix 时间戳。" +msgid ":py:obj:`shape `\\" +msgstr "server.strategy.Strategy" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid ":py:obj:`dst_node_id `\\" -msgstr ":py:obj:`dst_node_id `\\" +msgid ":py:obj:`stype `\\" +msgstr "server.strategy.Strategy" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.record.parametersrecord.Array.numpy:1::1 of #, fuzzy -msgid ":py:obj:`group_id `\\" -msgstr ":py:obj:`group_id `\\" +msgid ":py:obj:`data `\\" +msgstr ":py:obj:`data `\\" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.group_id:1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:2 #, fuzzy -msgid "An identifier for grouping messages." -msgstr "用于分组信息的标识符。" +msgid "ClientMessage" +msgstr "客户端" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid ":py:obj:`message_id `\\" -msgstr ":py:obj:`message_id `\\" +msgid ":py:obj:`evaluate_res `\\" +msgstr ":py:obj:`evaluate_res `\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid ":py:obj:`message_type `\\" -msgstr ":py:obj:`message_type `\\" +msgid ":py:obj:`fit_res `\\" +msgstr ":py:obj:`fit_res `\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid ":py:obj:`partition_id `\\" -msgstr ":py:obj:`partition_id `\\" +msgid "" +":py:obj:`get_parameters_res " +"`\\" +msgstr "" +":py:obj:`get_parameters_res " +"`\\" -#: flwr.common.Metadata.created_at:1::1 -#: flwr.common.Metadata.partition_id:1 of +#: ../../source/ref-api/flwr.common.ClientMessage.rst:31::1 #, fuzzy -msgid "An identifier telling which data partition a ClientApp should use." -msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" +msgid "" +":py:obj:`get_properties_res " +"`\\" +msgstr "" +":py:obj:`get_properties_res " +"`\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:2 #, fuzzy -msgid ":py:obj:`reply_to_message `\\" -msgstr ":py:obj:`reply_to_message `\\" +msgid "Code" +msgstr "代码" -#: flwr.common.Metadata.created_at:1::1 of +#: flwr.common.typing.Code:1 of #, fuzzy -msgid ":py:obj:`run_id `\\" -msgstr ":py:obj:`run_id `\\" +msgid "Bases: :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`~enum.Enum`" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid ":py:obj:`src_node_id `\\" -msgstr ":py:obj:`src_node_id `\\" +msgid ":py:obj:`OK `\\" +msgstr ":py:obj:`OK `\\" -#: flwr.common.Metadata.created_at:1::1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid ":py:obj:`ttl `\\" -msgstr ":py:obj:`ttl `\\" +msgid "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PROPERTIES_NOT_IMPLEMENTED " +"`\\" -#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 -#: of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid "Time-to-live for this message." -msgstr "该信息的有效时间。" +msgid "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" +msgstr "" +":py:obj:`GET_PARAMETERS_NOT_IMPLEMENTED " +"`\\" -#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy -msgid "MetricsRecord" -msgstr "MetricsRecord" +msgid ":py:obj:`FIT_NOT_IMPLEMENTED `\\" +msgstr ":py:obj:`FIT_NOT_IMPLEMENTED `\\" -#: flwr.common.record.metricsrecord.MetricsRecord:1 of +#: ../../source/ref-api/flwr.common.Code.rst:26::1 #, fuzzy msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " -":py:class:`~typing.List`\\ [:py:class:`int`] | :py:class:`~typing.List`\\" -" [:py:class:`float`]]" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " -":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " -":py:class:`~typing.List`\\ [:py:class:`float`]]]" +":py:obj:`EVALUATE_NOT_IMPLEMENTED " +"`\\" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.Config.rst:2 #, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +msgid "Config" +msgstr "配置日志记录" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: ../../source/ref-api/flwr.common.ConfigsRecord.rst:2 #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid "ConfigsRecord" +msgstr "配置日志记录" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: flwr.common.record.configsrecord.ConfigsRecord:1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgid "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | :py:class:`str` |" +" :py:class:`bytes` | :py:class:`bool` | :py:class:`list`\\ " +"[:py:class:`int`] | :py:class:`list`\\ [:py:class:`float`] | " +":py:class:`list`\\ [:py:class:`str`] | :py:class:`list`\\ " +"[:py:class:`bytes`] | :py:class:`list`\\ [:py:class:`bool`]]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`str`, :py:class:`bytes`, :py:class:`bool`, " +":py:class:`~typing.List`\\ [:py:class:`int`], :py:class:`~typing.List`\\ " +"[:py:class:`float`], :py:class:`~typing.List`\\ [:py:class:`str`], " +":py:class:`~typing.List`\\ [:py:class:`bytes`], " +":py:class:`~typing.List`\\ [:py:class:`bool`]]]" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +#: flwr.common.record.configsrecord.ConfigsRecord:3 of +msgid "" +"A :code:`ConfigsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`ConfigsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: flwr.common.record.configsrecord.ConfigsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `str`, `int`, `float`, `bytes`" +" as defined in `ConfigsScalar`) and lists of such types (see " +"`ConfigsScalarList`)." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +#: flwr.common.record.configsrecord.ConfigsRecord:13 of +msgid "" +"A boolean indicating whether config passed should be deleted from the " +"input dictionary immediately after adding them to the record. When set to" +" True, the data is duplicated in memory. If memory is a concern, set it " +"to False." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy +#: flwr.common.record.configsrecord.ConfigsRecord:21 of msgid "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" +"The usage of a :code:`ConfigsRecord` is envisioned for sending " +"configuration values telling the target node how to perform a certain " +"action (e.g. train/evaluate a model ). You can use standard Python built-" +"in types such as :code:`float`, :code:`str` , :code:`bytes`. All types " +"allowed are defined in :code:`flwr.common.ConfigsRecordValues`. While " +"lists are supported, we encourage you to use a :code:`ParametersRecord` " +"instead if these are of high dimensionality." msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " -"\\]\\*\\*F\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" +#: flwr.common.record.configsrecord.ConfigsRecord:29 of +msgid "" +"Let's see some examples of how to construct a :code:`ConfigsRecord` from " +"scratch:" +msgstr "" -#: ../../source/ref-api/flwr.common.NDArray.rst:2 -#, fuzzy -msgid "NDArray" -msgstr "NDArray" +#: flwr.common.record.configsrecord.ConfigsRecord:42 of +msgid "" +"Just like the other types of records in a :code:`flwr.common.RecordSet`, " +"types are enforced. If you need to add a custom data structure or object," +" we recommend to serialise it into bytes and save it as such (bytes are " +"allowed in a :code:`ConfigsRecord`)" +msgstr "" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`tensors `\\" -msgstr ":py:obj:`tensors `\\" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`tensor_type `\\" -msgstr ":py:obj:`tensor_type `\\" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 +#: collections.abc.MutableMapping.clear:1::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:1 +#: flwr.common.record.metricsrecord.MetricsRecord.count_bytes:1 +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:1 of #, fuzzy -msgid "ParametersRecord" -msgstr "参数" +msgid "Return number of Bytes stored in this object." +msgstr "返回存储在此对象中的字节数。" -#: flwr.common.record.parametersrecord.ParametersRecord:1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -msgstr "" -"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " -"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.common.record.parametersrecord.ParametersRecord:3 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -"A dataclass storing named Arrays in order. This means that it holds " -"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " -"viewed as an equivalent to PyTorch's state_dict, but holding serialised " -"tensors instead." -msgstr "" -"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条目。ParametersRecord " -"对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张量。" +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`clear `\\ \\(\\)" -msgstr ":py:obj:`clear `\\ \\(\\)" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`count_bytes `\\ \\(\\)" -msgstr ":py:obj:`count_bytes `\\ \\(\\)" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.pop:1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgid "If key is not found, d is returned if given, otherwise KeyError is raised." +msgstr "如果未找到 key,则返回 d(如果给定),否则引发 KeyError。" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`items `\\ \\(\\)" -msgstr ":py:obj:`items `\\ \\(\\)" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`keys `\\ \\(\\)" -msgstr ":py:obj:`keys `\\ \\(\\)" +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.popitem:1 of +msgid "as a 2-tuple; but raise KeyError if D is empty." +msgstr "" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgid "" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`update `\\ \\(\\[E\\, " +":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" msgstr "" -":py:obj:`update `\\ \\(\\[E\\, " +":py:obj:`update `\\ \\(\\[E\\, " "\\]\\*\\*F\\)" -#: flwr.common.record.typeddict.TypedDict.clear:1::1 of -#, fuzzy -msgid ":py:obj:`values `\\ \\(\\)" -msgstr ":py:obj:`values `\\ \\(\\)" - -#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of -#, fuzzy +#: collections.abc.MutableMapping.clear:1::1 +#: collections.abc.MutableMapping.update:1 of msgid "" -"Note that a small amount of Bytes might also be included in this counting" -" that correspond to metadata of the serialized object (e.g. of NumPy " -"array) needed for deseralization." -msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元数据相对应,需要进行去eralization。" +"If E present and has a .keys() method, does: for k in E: D[k] = E[k] " +"If E present and lacks .keys() method, does: for (k, v) in E: D[k] = " +"v In either case, this is followed by: for k, v in F.items(): D[k] = v" +msgstr "" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "ReconnectIns" -msgstr "启用 SSL 连接" +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 +#: flwr.common.record.configsrecord.ConfigsRecord.count_bytes:3 of #, fuzzy -msgid ":py:obj:`seconds `\\" -msgstr ":py:obj:`seconds `\\" +msgid "This function counts booleans as occupying 1 Byte." +msgstr "该函数将布尔值计算为占用 1 个字节。" -#: ../../source/ref-api/flwr.common.RecordSet.rst:2 +#: ../../source/ref-api/flwr.common.Context.rst:2 #, fuzzy -msgid "RecordSet" -msgstr "RecordSet" +msgid "Context" +msgstr "背景" -#: flwr.common.RecordSet.configs_records:1::1 of +#: flwr.common.context.Context:3 of #, fuzzy -msgid ":py:obj:`configs_records `\\" -msgstr ":py:obj:`configs_records `\\" +msgid "The ID that identifies the node." +msgstr "错误的标识符。" -#: flwr.common.RecordSet.configs_records:1 -#: flwr.common.RecordSet.configs_records:1::1 of -#, fuzzy -msgid "Dictionary holding ConfigsRecord instances." -msgstr "包含 ConfigsRecord 实例的字典。" +#: flwr.common.context.Context:5 of +msgid "" +"A config (key/value mapping) unique to the node and independent of the " +"`run_config`. This config persists across all runs this node participates" +" in." +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of +#: flwr.common.context.Context:8 of #, fuzzy -msgid ":py:obj:`metrics_records `\\" -msgstr ":py:obj:`metrics_records `\\" +msgid "" +"Holds records added by the entity in a given run and that will stay " +"local. This means that the data it holds will never leave the system it's" +" running from. This can be used as an intermediate storage or scratchpad " +"when executing mods. It can also be used as a memory to access at " +"different points during the lifecycle of this entity (e.g. across " +"multiple rounds)" +msgstr "保存实体在给定运行中添加的记录,这些记录将保留在本地。这意味着它保存的数据永远不会离开运行的系统。在执行模式时,它可用作中间存储或抓取板。它还可以作为存储器,在实体生命周期的不同阶段(如多轮)进行访问。" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.metrics_records:1 of -#, fuzzy -msgid "Dictionary holding MetricsRecord instances." -msgstr "保存 MetricsRecord 实例的字典。" +#: flwr.common.context.Context:15 of +msgid "" +"A config (key/value mapping) held by the entity in a given run and that " +"will stay local. It can be used at any point during the lifecycle of this" +" entity (e.g. across multiple rounds)" +msgstr "" -#: flwr.common.RecordSet.configs_records:1::1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`parameters_records `\\" -msgstr ":py:obj:`parameters_records `\\" +msgid ":py:obj:`node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.common.RecordSet.configs_records:1::1 -#: flwr.common.RecordSet.parameters_records:1 of +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "Dictionary holding ParametersRecord instances." -msgstr "存放 ParametersRecord 实例的字典。" +msgid ":py:obj:`node_config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid "ServerMessage" -msgstr "服务器端" +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.Context.rst:31::1 #, fuzzy -msgid ":py:obj:`evaluate_ins `\\" -msgstr ":py:obj:`evaluate_ins `\\" +msgid ":py:obj:`run_config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:2 #, fuzzy -msgid ":py:obj:`fit_ins `\\" -msgstr ":py:obj:`fit_ins `\\" +msgid "DisconnectRes" +msgstr "断开Res" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.DisconnectRes.rst:28::1 #, fuzzy -msgid "" -":py:obj:`get_parameters_ins " -"`\\" -msgstr "" -":py:obj:`get_parameters_ins " -"`\\" +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" -#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 +#: ../../source/ref-api/flwr.common.Error.rst:2 #, fuzzy -msgid "" -":py:obj:`get_properties_ins " -"`\\" -msgstr "" -":py:obj:`get_properties_ins " -"`\\" +msgid "Error" +msgstr "错误" -#: ../../source/ref-api/flwr.common.Status.rst:2 +#: flwr.common.message.Error:3 of #, fuzzy -msgid "Status" -msgstr "客户端状态。" +msgid "An identifier for the error." +msgstr "错误的标识符。" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 +#: flwr.common.message.Error:5 of #, fuzzy -msgid ":py:obj:`code `\\" -msgstr ":py:obj:`code `\\" +msgid "A reason for why the error arose (e.g. an exception stack-trace)" +msgstr "出错原因(如异常堆栈跟踪)" -#: ../../source/ref-api/flwr.common.Status.rst:29::1 +#: flwr.common.Error.code:1::1 of #, fuzzy -msgid ":py:obj:`message `\\" -msgstr ":py:obj:`message `\\" +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 +#: flwr.common.Error.code:1 flwr.common.Error.code:1::1 of #, fuzzy -msgid "array\\_from\\_numpy" -msgstr "array\\_from\\_numpy" +msgid "Error code." +msgstr "错误代码。" -#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 +#: flwr.common.Error.code:1::1 of #, fuzzy -msgid "bytes\\_to\\_ndarray" -msgstr "bytes\\_to\\_ndarray" +msgid ":py:obj:`reason `\\" +msgstr ":py:obj:`reason `\\" -#: ../../source/ref-api/flwr.common.configure.rst:2 +#: flwr.common.Error.code:1::1 flwr.common.Error.reason:1 of #, fuzzy -msgid "configure" -msgstr "配置日志记录" +msgid "Reason reported about the error." +msgstr "报告的错误原因。" -#: ../../source/ref-api/flwr.common.event.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:2 #, fuzzy -msgid "event" -msgstr "事件" +msgid "EvaluateIns" +msgstr "说明" -#: ../../source/ref-api/flwr.common.log.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 #, fuzzy -msgid "log" -msgstr "登录" - -#: logging.Logger.log:3 of -msgid "" -"To pass exception information, use the keyword argument exc_info with a " -"true value, e.g." -msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: logging.Logger.log:6 of -#, python-format -msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" -msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +#: ../../source/ref-api/flwr.common.EvaluateIns.rst:29::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:2 #, fuzzy -msgid "ndarray\\_to\\_bytes" -msgstr "ndarray\\_to\\_bytes" +msgid "EvaluateRes" +msgstr "评估Res" -#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "ndarrays\\_to\\_parameters" -msgstr "ndarrays\\_to\\_parameters" +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: ../../source/ref-api/flwr.common.now.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "now" -msgstr "现在" +msgid ":py:obj:`loss `\\" +msgstr ":py:obj:`loss `\\" -#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 #, fuzzy -msgid "parameters\\_to\\_ndarrays" -msgstr "parameters\\_to\\_ndarrays" +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" -#: ../../source/ref-api/flwr.server.rst:2 -msgid "server" -msgstr "服务器" +#: ../../source/ref-api/flwr.common.EvaluateRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:2 #, fuzzy -msgid ":py:obj:`run_server_app `\\ \\(\\)" -msgstr ":py:obj:`run_server_app `\\ \\(\\)" +msgid "EventType" +msgstr "返回类型" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.run_serverapp.run_server_app:1 of +#: flwr.common.telemetry.EventType:1 of #, fuzzy -msgid "Run Flower server app." -msgstr "Flower 服务器。" +msgid "Bases: :py:class:`str`, :py:class:`~enum.Enum`" +msgstr "Bases: :py:class:`str`, :py:class:`~enum.Enum`" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`run_superlink `\\ \\(\\)" -msgstr ":py:obj:`run_superlink `\\ \\(\\)" +msgid "" +":py:obj:`encode `\\ \\(\\[encoding\\, " +"errors\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.run_superlink:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.encode:1 of #, fuzzy -msgid "Run Flower SuperLink (Driver API and Fleet API)." -msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" +msgid "Encode the string using the codec registered for encoding." +msgstr "使用注册的编码解码器对字符串进行编码。" -#: ../../source/ref-api/flwr.server.rst:24::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" -msgstr "" -":py:obj:`start_server `\\ \\(\\*\\[\\, " -"server\\_address\\, server\\, ...\\]\\)" +":py:obj:`replace `\\ \\(old\\, new\\[\\, " +"count\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:24::1 -#: flwr.server.app.start_server:1 of -msgid "Start a Flower server using the gRPC transport layer." -msgstr "使用 gRPC 传输层启动 Flower 服务器。" +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.replace:1 of +#, fuzzy +msgid "Return a copy with all occurrences of substring old replaced by new." +msgstr "返回用 new 替换子串 old 的所有出现次数的副本。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`ClientManager `\\ \\(\\)" -msgstr ":py:obj:`ClientManager `\\ \\(\\)" +msgid "" +":py:obj:`split `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.ClientManager:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rsplit:1 flwr.common.EventType.split:1 of #, fuzzy -msgid "Abstract base class for managing Flower clients." -msgstr "Flower 客户端的抽象基类。" +msgid "" +"Return a list of the substrings in the string, using sep as the separator" +" string." +msgstr "使用 sep 作为分隔符,返回字符串中的子字符串列表。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`Driver `\\ \\(\\)" -msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +msgid "" +":py:obj:`rsplit `\\ \\(\\[sep\\, " +"maxsplit\\]\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.driver.driver.Driver:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Abstract base Driver class for the Driver API." -msgstr "Flower 客户端的抽象基类。" +msgid ":py:obj:`join `\\ \\(iterable\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.join:1 of #, fuzzy -msgid ":py:obj:`History `\\ \\(\\)" -msgstr ":py:obj:`History `\\ \\(\\)" +msgid "Concatenate any number of strings." +msgstr "连接任意数量的字符串。" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.history.History:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "History class for training and/or evaluation metrics collection." -msgstr "**hist** -- 包含训练和评估指标的对象。" +msgid ":py:obj:`capitalize `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.capitalize:1 of #, fuzzy -msgid "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" -msgstr "" -":py:obj:`LegacyContext `\\ \\(state\\[\\, " -"config\\, strategy\\, ...\\]\\)" +msgid "Return a capitalized version of the string." +msgstr "返回字符串的大写版本。" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.compat.legacy_context.LegacyContext:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Legacy Context." -msgstr "传承背景。" +msgid ":py:obj:`casefold `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.casefold:1 of #, fuzzy -msgid "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" -msgstr "" -":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " -"strategy\\]\\)" +msgid "Return a version of the string suitable for caseless comparisons." +msgstr "返回适合无例比较的字符串版本。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " -"strategy\\, ...\\]\\)" -msgstr "server.strategy.Strategy" +msgid ":py:obj:`title `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_app.ServerApp:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.title:1 of #, fuzzy -msgid "Flower ServerApp." -msgstr "Flower 服务器。" +msgid "Return a version of the string where each word is titlecased." +msgstr "返回字符串的版本,其中每个单词都使用了标题大小写。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," -" round\\_timeout\\]\\)" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +":py:obj:`center `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.server_config.ServerConfig:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.center:1 of #, fuzzy -msgid "Flower server config." -msgstr "Flower 服务器。" +msgid "Return a centered string of length width." +msgstr "返回客户端的属性集。" -#: ../../source/ref-api/flwr.server.rst:38::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" -msgstr ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgid "" +":py:obj:`count `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: ../../source/ref-api/flwr.server.rst:38::1 -#: flwr.server.client_manager.SimpleClientManager:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Provides a pool of available clients." -msgstr "使用部分可用客户进行评估。" +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]." +msgstr "返回子字符串 sub 在字符串 S[start:end] 中非重叠出现的次数。" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`flwr.server.strategy `\\" -msgstr "server.strategy.Strategy" - -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.strategy:1 of -msgid "Contains the strategy abstraction and different implementations." -msgstr "包含策略抽象和不同的实现方法。" +msgid "" +":py:obj:`expandtabs `\\ " +"\\(\\[tabsize\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.rst:57::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.expandtabs:1 of #, fuzzy -msgid ":py:obj:`flwr.server.workflow `\\" -msgstr "server.strategy.Strategy" +msgid "Return a copy where all tab characters are expanded using spaces." +msgstr "返回使用空格扩展所有制表符的副本。" -#: ../../source/ref-api/flwr.server.rst:57::1 -#: flwr.server.workflow:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Workflows." -msgstr "工作流程" +msgid "" +":py:obj:`find `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.ClientManager.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "ClientManager" -msgstr "客户端" +msgid "" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]." +msgstr "返回在 S 中找到子串 sub 的最低索引,且 sub 包含在 S[start:end] 中。" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`all `\\ \\(\\)" -msgstr ":py:obj:`all `\\ \\(\\)" +msgid ":py:obj:`partition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition_id `\\" -#: flwr.server.client_manager.ClientManager.all:1 -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.all:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.partition:1 flwr.common.EventType.rpartition:1 of #, fuzzy -msgid "Return all available clients." -msgstr "返回所有可用客户。" +msgid "Partition the string into three parts using the given separator." +msgstr "使用给定的分隔符将字符串分为三部分。" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`num_available `\\ \\(\\)" -msgstr ":py:obj:`num_available `\\ \\(\\)" +msgid "" +":py:obj:`index `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.num_available:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.num_available:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Return the number of available clients." -msgstr "返回样本大小和所需的可用客户数量。" +msgid "" +":py:obj:`ljust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.ljust:1 of #, fuzzy -msgid ":py:obj:`register `\\ \\(client\\)" -msgstr ":py:obj:`register `\\ \\(client\\)" +msgid "Return a left-justified string of length width." +msgstr "返回长度为 width 的左对齐字符串。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.register:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.register:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Register Flower ClientProxy instance." -msgstr "注册 Flower ClientProxy 实例。" +msgid ":py:obj:`lower `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lower:1 of #, fuzzy -msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -msgstr "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +msgid "Return a copy of the string converted to lowercase." +msgstr "返回转换为小写的字符串副本。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.sample:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.sample:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Sample a number of Flower ClientProxy instances." -msgstr "取样若干 Flower ClientProxy 实例。" +msgid ":py:obj:`lstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.lstrip:1 of #, fuzzy -msgid ":py:obj:`unregister `\\ \\(client\\)" -msgstr ":py:obj:`unregister `\\ \\(client\\)" +msgid "Return a copy of the string with leading whitespace removed." +msgstr "返回去掉前导空白的字符串副本。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.unregister:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Unregister Flower ClientProxy instance." -msgstr "取消注册 Flower ClientProxy 实例。" +msgid "" +":py:obj:`rfind `\\ \\(sub\\[\\, start\\[\\, " +"end\\]\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.client_manager.ClientManager.all:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" -msgstr "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\, timeout\\)" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]." +msgstr "返回在 S 中找到子串 sub 的最高索引,且 sub 包含在 S[start:end] 中。" -#: flwr.server.client_manager.ClientManager.all:1::1 -#: flwr.server.client_manager.ClientManager.wait_for:1 -#: flwr.server.client_manager.SimpleClientManager.all:1::1 -#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Wait until at least `num_clients` are available." -msgstr "等待至少 `num_clients` 可用。" +msgid "" +":py:obj:`rindex `\\ \\(sub\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`Context `\\ \\(state\\)" -#: flwr.server.client_manager.ClientManager.num_available:3 -#: flwr.server.client_manager.SimpleClientManager.num_available:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "**num_available** -- The number of currently available clients." -msgstr "**num_available** -- 当前可用客户端的数量。" +msgid "" +":py:obj:`rjust `\\ \\(width\\[\\, " +"fillchar\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.client_manager.ClientManager.register:6 -#: flwr.server.client_manager.SimpleClientManager.register:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rjust:1 of #, fuzzy -msgid "" -"**success** -- Indicating if registration was successful. False if " -"ClientProxy is already registered or can not be registered for any " -"reason." -msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 False。" +msgid "Return a right-justified string of length width." +msgstr "返回长度为 width 的右对齐字符串。" -#: flwr.server.client_manager.ClientManager.unregister:3 -#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "This method is idempotent." -msgstr "这种方法是幂等的。" +msgid ":py:obj:`rstrip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.Driver.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.rstrip:1 of #, fuzzy -msgid "Driver" -msgstr "服务器" +msgid "Return a copy of the string with trailing whitespace removed." +msgstr "返回去掉尾部空白的字符串副本。" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" -msgstr "" -":py:obj:`create_message `\\ " -"\\(content\\, message\\_type\\, ...\\)" +msgid ":py:obj:`rpartition `\\ \\(sep\\, \\/\\)" +msgstr ":py:obj:`partition_id `\\" -#: flwr.server.driver.driver.Driver.create_message:1 -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Create a new message with specified parameters." -msgstr "使用指定参数创建新信息。" +msgid "" +":py:obj:`splitlines `\\ " +"\\(\\[keepends\\]\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.splitlines:1 of #, fuzzy -msgid ":py:obj:`get_node_ids `\\ \\(\\)" -msgstr ":py:obj:`get_node_ids `\\ \\(\\)" +msgid "Return a list of the lines in the string, breaking at line boundaries." +msgstr "返回字符串中的行列表,以行为分界线。" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.get_node_ids:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Get node IDs." -msgstr "获取节点 ID。" +msgid ":py:obj:`strip `\\ \\(\\[chars\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.strip:1 of #, fuzzy -msgid "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" -msgstr "" -":py:obj:`pull_messages `\\ " -"\\(message\\_ids\\)" +msgid "Return a copy of the string with leading and trailing whitespace removed." +msgstr "返回去掉前导和尾部空白的字符串副本。" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.pull_messages:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Pull messages based on message IDs." -msgstr "根据信息 ID 提取信息。" +msgid ":py:obj:`swapcase `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.swapcase:1 of #, fuzzy msgid "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" -msgstr "" -":py:obj:`push_messages `\\ " -"\\(messages\\)" +"Convert uppercase characters to lowercase and lowercase characters to " +"uppercase." +msgstr "将大写字母转换为小写字母,将小写字母转换为大写字母。" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.push_messages:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Push messages to specified node IDs." -msgstr "向指定的节点 ID 推送信息。" +msgid ":py:obj:`translate `\\ \\(table\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.translate:1 of #, fuzzy -msgid "" -":py:obj:`send_and_receive `\\ " -"\\(messages\\, \\*\\[\\, timeout\\]\\)" -msgstr "" -"Flower 1.0: ``start_server(..., " -"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " -"...)``" +msgid "Replace each character in the string using the given translation table." +msgstr "使用给定的翻译表替换字符串中的每个字符。" -#: flwr.server.driver.driver.Driver.create_message:1::1 -#: flwr.server.driver.driver.Driver.send_and_receive:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Push messages to specified node IDs and pull the reply messages." -msgstr "向指定的节点 ID 推送信息并提取回复信息。" +msgid ":py:obj:`upper `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.driver.driver.Driver.create_message:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.upper:1 of #, fuzzy -msgid "" -"This method constructs a new `Message` with given content and metadata. " -"The `run_id` and `src_node_id` will be set automatically." -msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id \"将自动设置。" +msgid "Return a copy of the string converted to uppercase." +msgstr "返回转换为大写字符串的副本。" -#: flwr.server.driver.driver.Driver.create_message:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"The content for the new message. This holds records that are to be sent " -"to the destination node." -msgstr "新信息的内容。其中包含要发送到目的节点的记录。" +":py:obj:`startswith `\\ \\(prefix\\[\\," +" start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`Status `\\ \\(code\\, message\\)" -#: flwr.server.driver.driver.Driver.create_message:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#, fuzzy +msgid "Return True if S starts with the specified prefix, False otherwise." +msgstr "如果 S 以指定前缀开头,则返回 True,否则返回 False。" + +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"The type of the message, defining the action to be executed on the " -"receiving end." -msgstr "信息类型,定义接收端要执行的操作。" +":py:obj:`endswith `\\ \\(suffix\\[\\, " +"start\\[\\, end\\]\\]\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:12 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "The ID of the destination node to which the message is being sent." -msgstr "信息发送目的地节点的 ID。" +msgid "Return True if S ends with the specified suffix, False otherwise." +msgstr "如果 S 以指定后缀结束,则返回 True,否则返回 False。" -#: flwr.server.driver.driver.Driver.create_message:14 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"The ID of the group to which this message is associated. In some " -"settings, this is used as the FL round." -msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" - -#: flwr.server.driver.driver.Driver.create_message:17 of -#, fuzzy -msgid "" -"Time-to-live for the round trip of this message, i.e., the time from " -"sending this message to receiving a reply. It specifies in seconds the " -"duration for which the message and its potential reply are considered " -"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " -"used." -msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回复被视为有效的持续时间。" +":py:obj:`removeprefix `\\ " +"\\(prefix\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.create_message:23 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removeprefix:1 of #, fuzzy -msgid "" -"**message** -- A new `Message` instance with the specified content and " -"metadata." -msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" +msgid "Return a str with the given prefix string removed if present." +msgstr "返回一个字符串,如果存在,则去掉给定的前缀字符串。" -#: flwr.server.driver.driver.Driver.pull_messages:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -"This method is used to collect messages from the SuperLink that " -"correspond to a set of given message IDs." -msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" +":py:obj:`removesuffix `\\ " +"\\(suffix\\, \\/\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.pull_messages:6 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.removesuffix:1 of #, fuzzy -msgid "An iterable of message IDs for which reply messages are to be retrieved." -msgstr "要检索回复信息的信息 ID 的可迭代项。" +msgid "Return a str with the given suffix string removed if present." +msgstr "返回一个字符串,如果存在给定的后缀字符串,则将其删除。" -#: flwr.server.driver.driver.Driver.pull_messages:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "**messages** -- An iterable of messages received." -msgstr "**messages** -- 收到的信息迭代。" +msgid ":py:obj:`isascii `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.driver.driver.Driver.push_messages:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isascii:1 of #, fuzzy -msgid "" -"This method takes an iterable of messages and sends each message to the " -"node specified in `dst_node_id`." -msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" +msgid "Return True if all characters in the string are ASCII, False otherwise." +msgstr "如果字符串中的所有字符都是 ASCII 码,则返回 True,否则返回 False。" -#: flwr.server.driver.driver.Driver.push_messages:6 -#: flwr.server.driver.driver.Driver.send_and_receive:7 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "An iterable of messages to be sent." -msgstr "要发送的信息迭代。" +msgid ":py:obj:`islower `\\ \\(\\)" +msgstr ":py:obj:`now `\\ \\(\\)" -#: flwr.server.driver.driver.Driver.push_messages:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.islower:1 of #, fuzzy -msgid "" -"**message_ids** -- An iterable of IDs for the messages that were sent, " -"which can be used to pull replies." -msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" +msgid "Return True if the string is a lowercase string, False otherwise." +msgstr "如果字符串是小写字符串,则返回 True,否则返回 False。" -#: flwr.server.driver.driver.Driver.send_and_receive:3 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -"This method sends a list of messages to their destination node IDs and " -"then waits for the replies. It continues to pull replies until either all" -" replies are received or the specified timeout duration is exceeded." -msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到所有回复或超过指定的超时时间。" +msgid ":py:obj:`isupper `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.send_and_receive:9 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isupper:1 of #, fuzzy -msgid "" -"The timeout duration in seconds. If specified, the method will wait for " -"replies for this duration. If `None`, there is no time limit and the " -"method will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没有时间限制,该方法将等待直到收到所有信息的回复。" +msgid "Return True if the string is an uppercase string, False otherwise." +msgstr "如果字符串是大写字符串,则返回 True,否则返回 False。" -#: flwr.server.driver.driver.Driver.send_and_receive:14 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "**replies** -- An iterable of reply messages received from the SuperLink." -msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" +msgid ":py:obj:`istitle `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.driver.driver.Driver.send_and_receive:19 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.istitle:1 of #, fuzzy -msgid "" -"This method uses `push_messages` to send the messages and `pull_messages`" -" to collect the replies. If `timeout` is set, the method may not return " -"replies for all sent messages. A message remains valid until its TTL, " -"which is not affected by `timeout`." -msgstr "" -"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " -"`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" +msgid "Return True if the string is a title-cased string, False otherwise." +msgstr "如果字符串是带标题的字符串,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.History.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "History" -msgstr "历史" +msgid ":py:obj:`isspace `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isspace:1 of #, fuzzy -msgid "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" -":py:obj:`add_loss_centralized " -"`\\ \\(server\\_round\\, " -"loss\\)" +msgid "Return True if the string is a whitespace string, False otherwise." +msgstr "如果字符串是空白字符串,则返回 True,否则返回 False。" -#: flwr.server.history.History.add_loss_centralized:1 -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add one loss entry (from centralized evaluation)." -msgstr "集中评估" +msgid ":py:obj:`isdecimal `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdecimal:1 of #, fuzzy -msgid "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" -msgstr "" -":py:obj:`add_loss_distributed " -"`\\ \\(server\\_round\\, " -"loss\\)" +msgid "Return True if the string is a decimal string, False otherwise." +msgstr "如果字符串是十进制字符串,则返回 True,否则返回 False。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_loss_distributed:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add one loss entry (from distributed evaluation)." -msgstr "增加一个损失条目(来自分布式评估)。" +msgid ":py:obj:`isdigit `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isdigit:1 of #, fuzzy -msgid "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" -msgstr "" -":py:obj:`add_metrics_centralized " -"`\\ \\(server\\_round\\, " -"metrics\\)" +msgid "Return True if the string is a digit string, False otherwise." +msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_centralized:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add metrics entries (from centralized evaluation)." -msgstr "集中评估" +msgid ":py:obj:`isnumeric `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isnumeric:1 of #, fuzzy -msgid "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" -msgstr "" -":py:obj:`add_metrics_distributed " -"`\\ \\(server\\_round\\, " -"metrics\\)" +msgid "Return True if the string is a numeric string, False otherwise." +msgstr "如果字符串是数字字符串,则返回 True,否则返回 False。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add metrics entries (from distributed evaluation)." -msgstr "定制的集中/分布式评估" +msgid ":py:obj:`isalpha `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: flwr.server.history.History.add_loss_centralized:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalpha:1 of #, fuzzy -msgid "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" -msgstr "" -":py:obj:`add_metrics_distributed_fit " -"`\\ \\(server\\_round\\," -" ...\\)" +msgid "Return True if the string is an alphabetic string, False otherwise." +msgstr "如果字符串是字母字符串,则返回 True,否则返回 False。" -#: flwr.server.history.History.add_loss_centralized:1::1 -#: flwr.server.history.History.add_metrics_distributed_fit:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Add metrics entries (from distributed fit)." -msgstr "添加度量条目(来自分布式拟合)。" +msgid ":py:obj:`isalnum `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isalnum:1 of #, fuzzy -msgid "LegacyContext" -msgstr "遗留上下文" +msgid "Return True if the string is an alpha-numeric string, False otherwise." +msgstr "如果字符串是字母数字字符串,则返回 True,否则返回 False。" -#: flwr.server.compat.legacy_context.LegacyContext:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Bases: :py:class:`~flwr.common.context.Context`" -msgstr "Bases: :py:class:`~flwr.common.context.Context`" +msgid ":py:obj:`isidentifier `\\ \\(\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isidentifier:1 of #, fuzzy -msgid ":py:obj:`config `\\" -msgstr "server.strategy.Strategy" +msgid "Return True if the string is a valid Python identifier, False otherwise." +msgstr "如果字符串是有效的 Python 标识符,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`strategy `\\" -msgstr "server.strategy.Strategy" +msgid ":py:obj:`isprintable `\\ \\(\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.isprintable:1 of #, fuzzy -msgid ":py:obj:`client_manager `\\" -msgstr ":py:obj:`client_manager `\\" +msgid "Return True if the string is printable, False otherwise." +msgstr "如果字符串可打印,则返回 True,否则返回 False。" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid ":py:obj:`history `\\" -msgstr "server.strategy.Strategy" +msgid ":py:obj:`zfill `\\ \\(width\\, \\/\\)" +msgstr ":py:obj:`PING `\\" -#: ../../source/ref-api/flwr.server.LegacyContext.rst:32::1 +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.zfill:1 of #, fuzzy -msgid ":py:obj:`state `\\" -msgstr "server.strategy.Strategy" +msgid "" +"Pad a numeric string with zeros on the left, to fill a field of the given" +" width." +msgstr "在数字字符串左侧填充零,以填满给定宽度的字段。" -#: ../../source/ref-api/flwr.server.Server.rst:2 -msgid "Server" -msgstr "服务器" - -#: flwr.server.server.Server.client_manager:1::1 of -#, fuzzy -msgid ":py:obj:`client_manager `\\ \\(\\)" -msgstr ":py:obj:`client_manager `\\ \\(\\)" - -#: flwr.server.server.Server.client_manager:1 -#: flwr.server.server.Server.client_manager:1::1 of -#, fuzzy -msgid "Return ClientManager." -msgstr "返回客户端(本身)。" - -#: flwr.server.server.Server.client_manager:1::1 of -#, fuzzy -msgid "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" -msgstr "" -":py:obj:`disconnect_all_clients " -"`\\ \\(timeout\\)" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.disconnect_all_clients:1 of -#, fuzzy -msgid "Send shutdown signal to all clients." -msgstr "向所有客户端发送关闭信号。" - -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy msgid "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" +":py:obj:`format `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -":py:obj:`evaluate_round `\\ " -"\\(server\\_round\\, timeout\\)" - -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.evaluate_round:1 of -#, fuzzy -msgid "Validate current global model on a number of clients." -msgstr "当前(全局)模型参数。" - -#: flwr.server.server.Server.client_manager:1::1 of -#, fuzzy -msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +":py:obj:`log `\\ \\(level\\, msg\\, \\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Run federated averaging for a number of rounds." -msgstr "联邦平均动量策略。" +msgid "Return a formatted version of S, using substitutions from args and kwargs." +msgstr "使用 args 和 kwargs 的替换,返回 S 的格式化版本。" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" -msgstr "" -":py:obj:`fit_round `\\ \\(server\\_round\\," -" timeout\\)" +msgid ":py:obj:`format_map `\\ \\(mapping\\)" +msgstr ":py:obj:`EventType `\\ \\(value\\)" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.fit_round:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "Perform a single round of federated averaging." -msgstr "本轮联邦学习。" +msgid "Return a formatted version of S, using substitutions from mapping." +msgstr "使用映射中的替换,返回 S 的格式化版本。" -#: flwr.server.server.Server.client_manager:1::1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 #, fuzzy -msgid "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" -msgstr "" -":py:obj:`set_max_workers `\\ " -"\\(max\\_workers\\)" +msgid ":py:obj:`maketrans `\\" +msgstr ":py:obj:`TRAIN `\\" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_max_workers:1 of +#: ../../source/ref-api/flwr.common.EventType.rst:163::1 +#: flwr.common.EventType.maketrans:1 of #, fuzzy -msgid "Set the max_workers used by ThreadPoolExecutor." -msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" +msgid "Return a translation table usable for str.translate()." +msgstr "返回可用于 str.translate() 的翻译表。" -#: flwr.server.server.Server.client_manager:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" -msgstr ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgid ":py:obj:`PING `\\" +msgstr ":py:obj:`PING `\\" -#: flwr.server.server.Server.client_manager:1::1 -#: flwr.server.server.Server.set_strategy:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Replace server strategy." -msgstr "server.strategy" +msgid ":py:obj:`START_CLIENT_ENTER `\\" +msgstr ":py:obj:`START_CLIENT_ENTER `\\" -#: ../../source/ref-api/flwr.server.ServerApp.rst:2 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "ServerApp" -msgstr "服务器" +msgid ":py:obj:`START_CLIENT_LEAVE `\\" +msgstr ":py:obj:`START_CLIENT_LEAVE `\\" -#: flwr.server.server_app.ServerApp:5 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Use the `ServerApp` with an existing `Strategy`:" -msgstr "使用现有策略" +msgid ":py:obj:`START_SERVER_ENTER `\\" +msgstr ":py:obj:`START_SERVER_ENTER `\\" -#: flwr.server.server_app.ServerApp:15 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Use the `ServerApp` with a custom main function:" -msgstr "使用带有自定义主函数的 `ServerApp`:" +msgid ":py:obj:`START_SERVER_LEAVE `\\" +msgstr ":py:obj:`START_SERVER_LEAVE `\\" -#: flwr.server.server_app.ServerApp.main:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid ":py:obj:`main `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: flwr.server.server_app.ServerApp.main:1 -#: flwr.server.server_app.ServerApp.main:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Return a decorator that registers the main fn with the server app." -msgstr "返回向服务器应用程序注册 main fn 的装饰器。" +msgid "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "ServerConfig" -msgstr "服务器" +msgid "" +":py:obj:`RUN_SUPEREXEC_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: flwr.server.server_config.ServerConfig:3 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"All attributes have default values which allows users to configure just " -"the ones they care about." -msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" +":py:obj:`RUN_SUPEREXEC_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid ":py:obj:`num_rounds `\\" -msgstr ":py:obj:`num_rounds `\\" +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid ":py:obj:`round_timeout `\\" -msgstr ":py:obj:`round_timeout `\\" +msgid "" +":py:obj:`CLI_FLOWER_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "SimpleClientManager" -msgstr "SimpleClientManager" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_ENTER " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_ENTER " +"`\\" -#: flwr.server.client_manager.SimpleClientManager:1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" -msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgid "" +":py:obj:`PYTHON_API_RUN_SIMULATION_LEAVE " +"`\\" +msgstr "" +":py:obj:`START_SIMULATION_LEAVE " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid ":py:obj:`all `\\ \\(\\)" -msgstr ":py:obj:`all `\\ \\(\\)" +msgid "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`num_available `\\" -" \\(\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" msgstr "" -":py:obj:`num_available `\\" -" \\(\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`register `\\ " -"\\(client\\)" +":py:obj:`RUN_SUPERNODE_ENTER " +"`\\" msgstr "" -":py:obj:`register `\\ " -"\\(client\\)" +":py:obj:`RUN_SUPERLINK_ENTER " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`RUN_SUPERNODE_LEAVE " +"`\\" msgstr "" -":py:obj:`sample `\\ " -"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" +":py:obj:`RUN_SUPERLINK_LEAVE " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`unregister `\\ " -"\\(client\\)" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" msgstr "" -":py:obj:`unregister `\\ " -"\\(client\\)" +":py:obj:`RUN_SERVER_APP_ENTER " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.all:1::1 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" msgstr "" -":py:obj:`wait_for `\\ " -"\\(num\\_clients\\[\\, timeout\\]\\)" +":py:obj:`RUN_SERVER_APP_LEAVE " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy msgid "" -"Blocks until the requested number of clients is available or until a " -"timeout is reached. Current timeout default: 1 day." -msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_ENTER " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of +#: flwr.common.EventType.capitalize:1::1 of #, fuzzy -msgid "The number of clients to wait for." -msgstr "需要等待的客户数量。" +msgid "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" +msgstr "" +":py:obj:`RUN_CLIENT_APP_LEAVE " +"`\\" -#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of +#: flwr.common.EventType.capitalize:3 of #, fuzzy -msgid "The time in seconds to wait for, defaults to 86400 (24h)." -msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" +msgid "" +"More specifically, make the first character have upper case and the rest " +"lower case." +msgstr "更具体地说,让第一个字符大写,其余字符小写。" -#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of +#: flwr.common.EventType.center:3 flwr.common.EventType.ljust:3 +#: flwr.common.EventType.rjust:3 of #, fuzzy -msgid "**success**" -msgstr "**success**" +msgid "Padding is done using the specified fill character (default is a space)." +msgstr "使用指定的填充字符(默认为空格)进行填充。" -#: ../../source/ref-api/flwr.server.run_driver_api.rst:2 +#: flwr.common.EventType.count:1 of #, fuzzy -msgid "run\\_driver\\_api" -msgstr "flower-driver-api" +msgid "" +"Return the number of non-overlapping occurrences of substring sub in " +"string S[start:end]. Optional arguments start and end are interpreted as" +" in slice notation." +msgstr "返回子串 sub 在字符串 S[start:end] 中非重叠出现的次数。 可选参数 start 和 end 按切分符号解释。" -#: ../../source/ref-api/flwr.server.run_fleet_api.rst:2 +#: flwr.common.EventType.encode:3 of #, fuzzy -msgid "run\\_fleet\\_api" -msgstr "run\\_fleet\\_api" +msgid "encoding" +msgstr "编码" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 +#: flwr.common.EventType.encode:4 of #, fuzzy -msgid "run\\_server\\_app" -msgstr "run\\_server\\_app" +msgid "The encoding in which to encode the string." +msgstr "字符串的编码。" -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 +#: flwr.common.EventType.encode:9 of #, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" +msgid "errors" +msgstr "错误" -#: ../../source/ref-api/flwr.server.start_server.rst:2 +#: flwr.common.EventType.encode:6 of #, fuzzy -msgid "start\\_server" -msgstr "server.start_server" - -#: flwr.server.app.start_server:3 of -msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." -msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" - -#: flwr.server.app.start_server:5 of -msgid "" -"A server implementation, either `flwr.server.Server` or a subclass " -"thereof. If no instance is provided, then `start_server` will create one." -msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" - -#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of -msgid "" -"Currently supported values are `num_rounds` (int, default: 1) and " -"`round_timeout` in seconds (float, default: None)." -msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" - -#: flwr.server.app.start_server:12 of -msgid "" -"An implementation of the abstract base class " -"`flwr.server.strategy.Strategy`. If no strategy is provided, then " -"`start_server` will use `flwr.server.strategy.FedAvg`." -msgstr "" -"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" - -#: flwr.server.app.start_server:16 of msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_server` will use " -"`flwr.server.client_manager.SimpleClientManager`." +"The error handling scheme to use for encoding errors. The default is " +"'strict' meaning that encoding errors raise a UnicodeEncodeError. Other " +"possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well " +"as any other name registered with codecs.register_error that can handle " +"UnicodeEncodeErrors." msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +"编码错误的错误处理方案。默认值为 \"strict\",即编码错误会引发 UnicodeEncodeError。 其他可能的值包括 " +"\"ignore\"、\"replace \"和 \"xmlcharrefreplace\",以及通过 codecs.register_error" +" 注册的、可处理 UnicodeEncodeErrror 的其他名称。" -#: flwr.server.app.start_server:21 of +#: flwr.common.EventType.endswith:1 of +#, fuzzy msgid "" -"The maximum length of gRPC messages that can be exchanged with the Flower" -" clients. The default should be sufficient for most models. Users who " -"train very large models might need to increase this value. Note that the " -"Flower clients need to be started with the same value (see " -"`flwr.client.start_client`), otherwise clients will not know about the " -"increased limit and block larger messages." +"Return True if S ends with the specified suffix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. suffix can also be a tuple of strings " +"to try." msgstr "" -"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " -"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" +"如果 S 以指定后缀结束,则返回 True,否则返回 False。如果起始位置可选,则从该位置开始测试 S。如果使用可选的 " +"end,则在该位置停止比较 S。后缀也可以是要尝试的字符串元组。" -#: flwr.server.app.start_server:28 of -msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order: * CA certificate. * " -"server certificate. * server private key." -msgstr "" -"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " -"服务器私钥。" +#: flwr.common.EventType.expandtabs:3 of +#, fuzzy +msgid "If tabsize is not given, a tab size of 8 characters is assumed." +msgstr "如果未给出制表符大小,则假定制表符大小为 8 个字符。" -#: flwr.server.app.start_server:28 of +#: flwr.common.EventType.find:1 flwr.common.EventType.index:1 of +#, fuzzy msgid "" -"Tuple containing root certificate, server certificate, and private key to" -" start a secure SSL-enabled server. The tuple is expected to have three " -"bytes elements in the following order:" -msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" - -#: flwr.server.app.start_server:32 of -msgid "CA certificate." -msgstr "CA 证书。" - -#: flwr.server.app.start_server:33 of -msgid "server certificate." -msgstr "服务器证书。" - -#: flwr.server.app.start_server:34 of -msgid "server private key." -msgstr "服务器私人密钥。" +"Return the lowest index in S where substring sub is found, such that sub " +"is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "返回在 S 中找到子串 sub 的最低索引,即 sub 包含在 S[start:end] 中。 可选参数 start 和 end 按切分符号解释。" -#: flwr.server.app.start_server:37 of -msgid "**hist** -- Object containing training and evaluation metrics." -msgstr "**hist** -- 包含训练和评估指标的对象。" +#: flwr.common.EventType.find:5 flwr.common.EventType.rfind:5 of +#, fuzzy +msgid "Return -1 on failure." +msgstr "失败时返回-1。" -#: flwr.server.app.start_server:42 of -msgid "Starting an insecure server:" -msgstr "启动不安全的服务器:" +#: flwr.common.EventType.format:1 of +#, fuzzy +msgid "" +"Return a formatted version of S, using substitutions from args and " +"kwargs. The substitutions are identified by braces ('{' and '}')." +msgstr "使用来自 args 和 kwargs 的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" -#: flwr.server.app.start_server:46 of -msgid "Starting an SSL-enabled server:" -msgstr "启动支持 SSL 的服务器:" +#: flwr.common.EventType.format_map:1 of +#, fuzzy +msgid "" +"Return a formatted version of S, using substitutions from mapping. The " +"substitutions are identified by braces ('{' and '}')." +msgstr "使用映射中的替换,返回 S 的格式化版本。替换用大括号('{'和'}')标识。" -#: ../../source/ref-api/flwr.server.strategy.rst:2 +#: flwr.common.EventType.index:5 flwr.common.EventType.rindex:5 of #, fuzzy -msgid "strategy" -msgstr "Krum 策略。" +msgid "Raises ValueError when the substring is not found." +msgstr "如果未找到子串,则引发 ValueError。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isalnum:3 of #, fuzzy msgid "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" -msgstr "" -":py:obj:`Bulyan `\\ \\(\\*\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.bulyan.Bulyan:1 of -msgid "Bulyan strategy." -msgstr "Bulyan 策略。" +"A string is alpha-numeric if all characters in the string are alpha-" +"numeric and there is at least one character in the string." +msgstr "如果字符串中的所有字符都是字母数字,且字符串中至少有一个字符,则该字符串为字母数字字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isalpha:3 of #, fuzzy msgid "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" -msgstr "" -":py:obj:`DPFedAvgAdaptive `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\)" +"A string is alphabetic if all characters in the string are alphabetic and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是字母,并且字符串中至少有一个字符,那么该字符串就是字母字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of -msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +#: flwr.common.EventType.isascii:3 of +#, fuzzy +msgid "" +"ASCII characters have code points in the range U+0000-U+007F. Empty " +"string is ASCII too." +msgstr "ASCII 字符的码位范围为 U+0000-U+007F。空字符串也是 ASCII 字符。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isdecimal:3 of #, fuzzy msgid "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -msgstr "" -":py:obj:`DPFedAvgFixed `\\ " -"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" +"A string is a decimal string if all characters in the string are decimal " +"and there is at least one character in the string." +msgstr "如果字符串中的所有字符都是十进制,并且字符串中至少有一个字符是十进制,那么该字符串就是十进制字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of -msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +#: flwr.common.EventType.isdigit:3 of +#, fuzzy +msgid "" +"A string is a digit string if all characters in the string are digits and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是数字,并且字符串中至少有一个字符,那么该字符串就是数字字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isidentifier:3 of #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +"Call keyword.iskeyword(s) to test whether string s is a reserved " +"identifier, such as \"def\" or \"class\"." +msgstr "调用 keyword.iskeyword(s) 测试字符串 s 是否为保留标识符,如 \"def \"或 \"class\"。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: of +#: flwr.common.EventType.islower:3 of #, fuzzy -msgid "Strategy wrapper for central DP with client-side adaptive clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid "" +"A string is lowercase if all cased characters in the string are lowercase" +" and there is at least one cased character in the string." +msgstr "如果字符串中的所有大小写字符都是小写,且字符串中至少有一个大小写字符,则该字符串为小写字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isnumeric:3 of #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " -"`\\ " -"\\(...\\)" +"A string is numeric if all characters in the string are numeric and there" +" is at least one character in the string." +msgstr "如果字符串中的所有字符都是数字,且字符串中至少有一个字符,则该字符串为数字字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: of +#: flwr.common.EventType.isprintable:3 of #, fuzzy -msgid "Strategy wrapper for central DP with server-side adaptive clipping." -msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" +msgid "" +"A string is printable if all of its characters are considered printable " +"in repr() or if it is empty." +msgstr "如果字符串的所有字符在 repr() 中都被认为是可打印的,或者字符串为空,那么该字符串就是可打印的。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isspace:3 of #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyClientSideFixedClipping " -"`\\ " -"\\(...\\)" +"A string is whitespace if all characters in the string are whitespace and" +" there is at least one character in the string." +msgstr "如果字符串中的所有字符都是空格,且字符串中至少有一个字符,则该字符串为空格。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: of +#: flwr.common.EventType.istitle:3 of #, fuzzy -msgid "Strategy wrapper for central DP with client-side fixed clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +msgid "" +"In a title-cased string, upper- and title-case characters may only follow" +" uncased characters and lowercase characters only cased ones." +msgstr "在标题大小写字符串中,大写和标题大小写字符只能跟在无大小写字符之后,小写字符只能跟在有大小写字符之后。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.isupper:3 of #, fuzzy msgid "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" -msgstr "" -":py:obj:`DifferentialPrivacyServerSideFixedClipping " -"`\\ " -"\\(...\\)" +"A string is uppercase if all cased characters in the string are uppercase" +" and there is at least one cased character in the string." +msgstr "如果字符串中所有带大小写的字符都是大写,并且字符串中至少有一个带大小写的字符,则该字符串为大写字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: of +#: flwr.common.EventType.join:3 of #, fuzzy -msgid "Strategy wrapper for central DP with server-side fixed clipping." -msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" +msgid "" +"The string whose method is called is inserted in between each given " +"string. The result is returned as a new string." +msgstr "方法被调用的字符串会被插入每个给定的字符串之间。结果将以新字符串的形式返回。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.join:6 of #, fuzzy -msgid "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +msgid "Example: '.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" +msgstr "示例:'.'.join(['ab', 'pq', 'rs']) -> 'ab.pq.rs'" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of -msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." -msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" +#: flwr.common.EventType.lstrip:3 flwr.common.EventType.rstrip:3 +#: flwr.common.EventType.strip:3 of +#, fuzzy +msgid "If chars is given and not None, remove characters in chars instead." +msgstr "如果给定的是 chars 而不是 None,则删除 chars 中的字符。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.maketrans:3 of #, fuzzy msgid "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"If there is only one argument, it must be a dictionary mapping Unicode " +"ordinals (integers) or characters to Unicode ordinals, strings or None. " +"Character keys will be then converted to ordinals. If there are two " +"arguments, they must be strings of equal length, and in the resulting " +"dictionary, each character in x will be mapped to the character at the " +"same position in y. If there is a third argument, it must be a string, " +"whose characters will be mapped to None in the result." msgstr "" -":py:obj:`FedAdam `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedadam.FedAdam:1 of -msgid "FedAdam - Adaptive Federated Optimization using Adam." -msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" +"如果只有一个参数,则必须是一个将 Unicode 序号(整数)或字符映射到 Unicode 序号、字符串或 None " +"的字典。字符键将被转换为序号。如果有两个参数,它们必须是长度相等的字符串,在生成的字典中,x 中的每个字符将被映射到 y 中相同位置的字符。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.partition:3 of #, fuzzy msgid "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAvg `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of -msgid "Federated Averaging strategy." -msgstr "联邦平均策略。" +"This will search for the separator in the string. If the separator is " +"found, returns a 3-tuple containing the part before the separator, the " +"separator itself, and the part after it." +msgstr "它会在字符串中搜索分隔符。 如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.partition:7 of #, fuzzy msgid "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAvgAndroid `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" +"If the separator is not found, returns a 3-tuple containing the original " +"string and two empty strings." +msgstr "如果找不到分隔符,则返回一个包含原始字符串和两个空字符串的 3 元组。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.removeprefix:3 of #, fuzzy msgid "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 of -msgid "Federated Averaging with Momentum strategy." -msgstr "联邦平均动量策略。" +"If the string starts with the prefix string, return string[len(prefix):]." +" Otherwise, return a copy of the original string." +msgstr "如果字符串以前缀字符串开始,则返回 string[len(prefix):]。否则,返回原始字符串的副本。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.removesuffix:3 of #, fuzzy msgid "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedMedian `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"If the string ends with the suffix string and that suffix is not empty, " +"return string[:-len(suffix)]. Otherwise, return a copy of the original " +"string." +msgstr "如果字符串以后缀字符串结尾,且后缀不为空,则返回 string[:-len(suffix)]。否则,返回原始字符串的副本。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedmedian.FedMedian:1 of +#: flwr.common.EventType.replace:5 of #, fuzzy -msgid "Configurable FedMedian strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" +msgid "count" +msgstr "背景" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.replace:4 of #, fuzzy msgid "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -msgstr "" -":py:obj:`FedOpt `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"Maximum number of occurrences to replace. -1 (the default value) means " +"replace all occurrences." +msgstr "要替换的最大出现次数。-1(默认值)表示替换所有出现次数。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedopt.FedOpt:1 of +#: flwr.common.EventType.replace:7 of #, fuzzy -msgid "Federated Optim strategy." -msgstr "联邦优化策略。" +msgid "" +"If the optional argument count is given, only the first count occurrences" +" are replaced." +msgstr "如果给出可选参数 count,则只替换第一个计数出现的次数。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rfind:1 flwr.common.EventType.rindex:1 of #, fuzzy msgid "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedProx `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedprox.FedProx:1 of -msgid "Federated Optimization strategy." -msgstr "联邦优化策略。" +"Return the highest index in S where substring sub is found, such that sub" +" is contained within S[start:end]. Optional arguments start and end are " +"interpreted as in slice notation." +msgstr "返回在 S 中找到子串 sub 且 sub 包含在 S[start:end] 中的最高索引。 可选参数 start 和 end 按切分符号解释。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rpartition:3 of #, fuzzy msgid "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedTrimmedAvg `\\ " -"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of -msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." -msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" +"This will search for the separator in the string, starting at the end. If" +" the separator is found, returns a 3-tuple containing the part before the" +" separator, the separator itself, and the part after it." +msgstr "它会从字符串的末尾开始搜索分隔符。如果找到分隔符,则返回一个包含分隔符前部分、分隔符本身和分隔符后部分的 3 元组。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rpartition:7 of #, fuzzy msgid "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" -msgstr "" -":py:obj:`FedXgbBagging `\\ " -"\\(\\[evaluate\\_function\\]\\)" +"If the separator is not found, returns a 3-tuple containing two empty " +"strings and the original string." +msgstr "如果找不到分隔符,则返回一个包含两个空字符串和原始字符串的 3 元组。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of #, fuzzy -msgid "Configurable FedXgbBagging strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" +msgid "sep" +msgstr "sep" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rsplit:4 flwr.common.EventType.split:4 of +#, fuzzy +msgid "The separator used to split the string." +msgstr "用于分割字符串的分隔符。" + +#: flwr.common.EventType.rsplit:6 flwr.common.EventType.split:6 of #, fuzzy msgid "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" -msgstr "" -":py:obj:`FedXgbCyclic `\\ " -"\\(\\*\\*kwargs\\)" +"When set to None (the default value), will split on any whitespace " +"character (including \\\\n \\\\r \\\\t \\\\f and spaces) and will discard" +" empty strings from the result." +msgstr "当设置为 \"无\"(默认值)时,将对任何空白字符(包括 \\n \\r \\t \\f 和空格)进行分割,并从结果中剔除空字符串。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of #, fuzzy -msgid "Configurable FedXgbCyclic strategy implementation." -msgstr "可配置的 FedAvg 策略实施。" +msgid "maxsplit" +msgstr "最大分割" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.rsplit:10 flwr.common.EventType.split:10 of #, fuzzy msgid "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" -msgstr "" -":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " -"\\*\\*kwargs\\)" +"Maximum number of splits (starting from the left). -1 (the default value)" +" means no limit." +msgstr "最大分割次数(从左边开始)。-1(默认值)表示没有限制。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of -msgid "Configurable FedXgbNnAvg strategy implementation." -msgstr "可配置的 FedXgbNAvg 策略实施。" +#: flwr.common.EventType.rsplit:13 of +#, fuzzy +msgid "Splitting starts at the end of the string and works to the front." +msgstr "从琴弦末端开始分弦,一直到琴弦前端。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.split:13 of #, fuzzy msgid "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" -msgstr "" -":py:obj:`FedYogi `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Note, str.split() is mainly useful for data that has been intentionally " +"delimited. With natural text that includes punctuation, consider using " +"the regular expression module." +msgstr "注意,str.split() 主要适用于有意分隔的数据。 对于包含标点符号的自然文本,可以考虑使用正则表达式模块。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of -msgid "FedYogi [Reddi et al., 2020] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +#: flwr.common.EventType.splitlines:3 of +#, fuzzy +msgid "" +"Line breaks are not included in the resulting list unless keepends is " +"given and true." +msgstr "除非指定 keepends 为 true,否则换行符不会包含在生成的列表中。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.startswith:1 of #, fuzzy msgid "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" +"Return True if S starts with the specified prefix, False otherwise. With " +"optional start, test S beginning at that position. With optional end, " +"stop comparing S at that position. prefix can also be a tuple of strings " +"to try." msgstr "" -":py:obj:`FaultTolerantFedAvg " -"`\\ \\(\\*\\[\\, " -"fraction\\_fit\\, ...\\]\\)" - -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of -msgid "Configurable fault-tolerant FedAvg strategy implementation." -msgstr "可配置的容错 FedAvg 策略实施。" +"如果 S 以指定的前缀开始,则返回 True,否则返回 False。如果选择 start,则从该位置开始测试 S。如果使用可选的 " +"end,则在该位置停止比较 S。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.title:3 of #, fuzzy msgid "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -msgstr "" -":py:obj:`Krum `\\ \\(\\*\\[\\, " -"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +"More specifically, words start with uppercased characters and all " +"remaining cased characters have lower case." +msgstr "更具体地说,单词以大写字母开头,其余所有大小写字符均为小写。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.krum.Krum:1 of +#: flwr.common.EventType.translate:5 of #, fuzzy -msgid "Krum [Blanchard et al., 2017] strategy." -msgstr "FedYogi [Reddi 等人,2020] 策略。" +msgid "table" +msgstr "数据库" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.common.EventType.translate:4 of #, fuzzy msgid "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"Translation table, which must be a mapping of Unicode ordinals to Unicode" +" ordinals, strings, or None." +msgstr "翻译表,必须是 Unicode 序号到 Unicode 序号、字符串或无的映射。" + +#: flwr.common.EventType.translate:7 of +#, fuzzy +msgid "" +"The table must implement lookup/indexing via __getitem__, for instance a " +"dictionary or list. If this operation raises LookupError, the character " +"is left untouched. Characters mapped to None are deleted." msgstr "" -":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " -"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +"表必须通过 __getitem__ 实现查找/索引,例如字典或列表。 如果该操作引发 LookupError,该字符将保持不变。 映射为 None" +" 的字符将被删除。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.qfedavg.QFedAvg:1 of -msgid "Configurable QFedAvg strategy implementation." -msgstr "可配置的 QFedAvg 策略实施。" +#: flwr.common.EventType.zfill:3 of +#, fuzzy +msgid "The string is never truncated." +msgstr "字符串不会被截断。" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: ../../source/ref-api/flwr.common.FitIns.rst:2 #, fuzzy -msgid ":py:obj:`Strategy `\\ \\(\\)" -msgstr "server.strategy.Strategy" +msgid "FitIns" +msgstr "FitIns" -#: ../../source/ref-api/flwr.server.strategy.rst:45::1 -#: flwr.server.strategy.strategy.Strategy:1 of -msgid "Abstract base class for server strategy implementations." -msgstr "服务器策略实现的抽象基类。" +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 +#, fuzzy +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 +#: ../../source/ref-api/flwr.common.FitIns.rst:29::1 #, fuzzy -msgid "Bulyan" -msgstr "Bulyan" +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.strategy.bulyan.Bulyan:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 -#: flwr.server.strategy.fedavgm.FedAvgM:1 -#: flwr.server.strategy.fedmedian.FedMedian:1 -#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 -#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of +#: ../../source/ref-api/flwr.common.FitRes.rst:2 #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" -msgstr "server.strategy.DPFedAvgFixed" +msgid "FitRes" +msgstr "FitRes" -#: flwr.server.strategy.bulyan.Bulyan:3 of -msgid "Implementation based on https://arxiv.org/abs/1802.07927." -msgstr "实施基于 https://arxiv.org/abs/1802.07927。" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.strategy.bulyan.Bulyan:5 -#: flwr.server.strategy.fedadagrad.FedAdagrad:5 -#: flwr.server.strategy.fedadam.FedAdam:5 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 -#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 -#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of -msgid "Fraction of clients used during training. Defaults to 1.0." -msgstr "训练期间使用客户的比例。默认为 1.0。" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.strategy.bulyan.Bulyan:7 -#: flwr.server.strategy.fedadagrad.FedAdagrad:7 -#: flwr.server.strategy.fedadam.FedAdam:7 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 -#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 -#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of -msgid "Fraction of clients used during validation. Defaults to 1.0." -msgstr "验证过程中使用的客户端比例。默认为 1.0。" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`num_examples `\\" +msgstr ":py:obj:`num_examples `\\" -#: flwr.server.strategy.bulyan.Bulyan:9 -#: flwr.server.strategy.fedadagrad.FedAdagrad:9 -#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 -#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 -#: flwr.server.strategy.fedprox.FedProx:45 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 -#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of -msgid "Minimum number of clients used during training. Defaults to 2." -msgstr "训练期间使用的最少客户数。默认为 2。" +#: ../../source/ref-api/flwr.common.FitRes.rst:31::1 +#, fuzzy +msgid ":py:obj:`metrics `\\" +msgstr ":py:obj:`metrics `\\" -#: flwr.server.strategy.bulyan.Bulyan:11 -#: flwr.server.strategy.fedadagrad.FedAdagrad:11 -#: flwr.server.strategy.fedadam.FedAdam:11 -#: flwr.server.strategy.fedavg.FedAvg:15 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 -#: flwr.server.strategy.fedavgm.FedAvgM:11 -#: flwr.server.strategy.fedopt.FedOpt:11 -#: flwr.server.strategy.fedprox.FedProx:47 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 -#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of -msgid "Minimum number of clients used during validation. Defaults to 2." -msgstr "验证过程中使用的最少客户端数量。默认为 2。" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:2 +#, fuzzy +msgid "GetParametersIns" +msgstr "参数" -#: flwr.server.strategy.bulyan.Bulyan:13 -#: flwr.server.strategy.fedadagrad.FedAdagrad:13 -#: flwr.server.strategy.fedadam.FedAdam:13 -#: flwr.server.strategy.fedavg.FedAvg:17 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 -#: flwr.server.strategy.fedavgm.FedAvgM:13 -#: flwr.server.strategy.fedopt.FedOpt:13 -#: flwr.server.strategy.fedprox.FedProx:49 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 -#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of -msgid "Minimum number of total clients in the system. Defaults to 2." -msgstr "系统中客户总数的最小值。默认为 2。" +#: ../../source/ref-api/flwr.common.GetParametersIns.rst:28::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of -msgid "Number of malicious clients in the system. Defaults to 0." -msgstr "系统中恶意客户端的数量。默认为 0。" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:2 +#, fuzzy +msgid "GetParametersRes" +msgstr "参数" -#: flwr.server.strategy.bulyan.Bulyan:17 -#: flwr.server.strategy.fedadagrad.FedAdagrad:15 -#: flwr.server.strategy.fedadam.FedAdam:15 -#: flwr.server.strategy.fedavg.FedAvg:19 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 -#: flwr.server.strategy.fedavgm.FedAvgM:15 -#: flwr.server.strategy.fedopt.FedOpt:15 -#: flwr.server.strategy.fedprox.FedProx:51 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 -#: flwr.server.strategy.fedyogi.FedYogi:17 -#: flwr.server.strategy.fedyogi.FedYogi:18 -#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of -msgid "Optional function used for validation. Defaults to None." -msgstr "用于验证的可选函数。默认为 \"无\"。" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#, fuzzy +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.strategy.bulyan.Bulyan:19 -#: flwr.server.strategy.fedadagrad.FedAdagrad:17 -#: flwr.server.strategy.fedadam.FedAdam:17 -#: flwr.server.strategy.fedavg.FedAvg:21 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 -#: flwr.server.strategy.fedavgm.FedAvgM:17 -#: flwr.server.strategy.fedopt.FedOpt:17 -#: flwr.server.strategy.fedprox.FedProx:53 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 -#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of -msgid "Function used to configure training. Defaults to None." -msgstr "用于配置训练的功能。默认为 \"无\"。" +#: ../../source/ref-api/flwr.common.GetParametersRes.rst:29::1 +#, fuzzy +msgid ":py:obj:`parameters `\\" +msgstr ":py:obj:`parameters `\\" -#: flwr.server.strategy.bulyan.Bulyan:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:19 -#: flwr.server.strategy.fedadam.FedAdam:19 -#: flwr.server.strategy.fedavg.FedAvg:23 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 -#: flwr.server.strategy.fedavgm.FedAvgM:19 -#: flwr.server.strategy.fedopt.FedOpt:19 -#: flwr.server.strategy.fedprox.FedProx:55 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 -#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of -msgid "Function used to configure validation. Defaults to None." -msgstr "用于配置验证的函数。默认为 \"无\"。" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:2 +#, fuzzy +msgid "GetPropertiesIns" +msgstr "GetPropertiesIns" -#: flwr.server.strategy.bulyan.Bulyan:23 -#: flwr.server.strategy.fedadagrad.FedAdagrad:25 -#: flwr.server.strategy.fedadam.FedAdam:21 -#: flwr.server.strategy.fedavg.FedAvg:25 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 -#: flwr.server.strategy.fedavgm.FedAvgM:21 -#: flwr.server.strategy.fedopt.FedOpt:21 -#: flwr.server.strategy.fedprox.FedProx:57 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 -#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of -msgid "Whether or not accept rounds containing failures. Defaults to True." -msgstr "是否接受包含失败的轮。默认为 True。" +#: ../../source/ref-api/flwr.common.GetPropertiesIns.rst:28::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr ":py:obj:`config `\\" -#: flwr.server.strategy.bulyan.Bulyan:25 -#: flwr.server.strategy.fedadagrad.FedAdagrad:27 -#: flwr.server.strategy.fedadam.FedAdam:23 -#: flwr.server.strategy.fedavg.FedAvg:27 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 -#: flwr.server.strategy.fedavgm.FedAvgM:23 -#: flwr.server.strategy.fedopt.FedOpt:23 -#: flwr.server.strategy.fedprox.FedProx:59 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 -#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of -msgid "Initial global model parameters." -msgstr "初始全局模型参数。" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:2 +#, fuzzy +msgid "GetPropertiesRes" +msgstr "GetPropertiesRes" -#: flwr.server.strategy.bulyan.Bulyan:27 of -msgid "" -"Byzantine resilient aggregation rule that is used as the first step of " -"the Bulyan (e.g., Krum)" -msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#, fuzzy +msgid ":py:obj:`status `\\" +msgstr ":py:obj:`status `\\" -#: flwr.server.strategy.bulyan.Bulyan:29 of -msgid "arguments to the first_aggregation rule" -msgstr "第一聚类规则的参数" +#: ../../source/ref-api/flwr.common.GetPropertiesRes.rst:29::1 +#, fuzzy +msgid ":py:obj:`properties `\\" +msgstr ":py:obj:`properties `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Message.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +msgid "Message" +msgstr "服务器端" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate evaluation losses using weighted average." -msgstr "采用加权平均法计算评估损失总额。" +#: flwr.common.Message.content:1::1 flwr.common.Message.metadata:1 +#: flwr.common.message.Message:3 of +#, fuzzy +msgid "A dataclass including information about the message to be executed." +msgstr "数据类型,包括要执行的信息的相关信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.message.Message:5 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Holds records either sent by another entity (e.g. sent by the server-side" +" logic to a client, or vice-versa) or that will be sent to it." +msgstr "保存由其他实体发送的记录(如由服务器端逻辑发送到客户端,反之亦然)或将发送到该实体的记录。" -#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -msgid "Aggregate fit results using Bulyan." -msgstr "使用 Bulyan 技术汇总拟合结果。" +#: flwr.common.message.Message:8 of +#, fuzzy +msgid "" +"A dataclass that captures information about an error that took place when" +" processing another message." +msgstr "数据类,用于捕捉处理其他报文时发生的错误信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`create_error_reply `\\ " +"\\(error\\, ttl\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of -msgid "Configure the next round of evaluation." -msgstr "配置下一轮评估。" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_error_reply:1 of +#, fuzzy +msgid "Construct a reply message indicating an error happened." +msgstr "构建一条回复信息,说明发生了错误。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_reply `\\ " +"\\(content\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`create_reply `\\ \\(content\\," +" ttl\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 -#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of -msgid "Configure the next round of training." -msgstr "配置下一轮训练。" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.create_reply:1 of +#, fuzzy +msgid "Create a reply to this message with specified content and TTL." +msgstr "以指定的内容和 TTL 创建对该信息的回复。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`has_content `\\ \\(\\)" +msgstr ":py:obj:`has_content `\\ \\(\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Evaluate model parameters using an evaluation function." -msgstr "使用评估函数评估模型参数。" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_content:1 of +#, fuzzy +msgid "Return True if message has content, else False." +msgstr "如果信息有内容,则返回 True,否则返回 False。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Message.rst:35::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`has_error `\\ \\(\\)" +msgstr ":py:obj:`has_error `\\ \\(\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 -#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of -msgid "Initialize global model parameters." -msgstr "初始化全局模型参数。" +#: ../../source/ref-api/flwr.common.Message.rst:35::1 +#: flwr.common.message.Message.has_error:1 of +#, fuzzy +msgid "Return True if message has an error, else False." +msgstr "如果信息有错误,则返回 True,否则返回 False。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`content `\\" +msgstr ":py:obj:`content `\\" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of -msgid "Use a fraction of available clients for evaluation." -msgstr "使用部分可用客户进行评估。" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.common.Message.content:1 flwr.common.Message.content:1::1 +#: of #, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +msgid "The content of this message." +msgstr "评估客户端的反应。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of -msgid "Return the sample size and the required number of available clients." -msgstr "返回样本大小和所需的可用客户数量。" +#: flwr.common.Message.content:1::1 of +#, fuzzy +msgid ":py:obj:`error `\\" +msgstr ":py:obj:`error `\\" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 -msgid "DPFedAvgAdaptive" -msgstr "DPFedAvgAdaptive" +#: flwr.common.Message.content:1::1 flwr.common.Message.error:1 of +#, fuzzy +msgid "Error captured by this message." +msgstr "该信息捕捉到的错误。" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +#: flwr.common.Message.content:1::1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" -msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgid ":py:obj:`metadata `\\" +msgstr ":py:obj:`metadata `\\" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#: flwr.common.message.Message.create_error_reply:3 of #, fuzzy -msgid "This class is deprecated and will be removed in a future release." -msgstr "该类已被弃用,将在以后的版本中删除。" +msgid "The error that was encountered." +msgstr "遇到的错误。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - " +"msg.meta.created_at)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" - -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate evaluation losses using the given strategy." -msgstr "使用给定的策略汇总评估损失。" +"该信息的有效时间(秒)。如果未设置,则将根据收到的信息过期前的剩余时间来设置。其计算公式为:ttl = msg.meta.ttl - " +"(reply.meta.created_at - msg.meta.created_at)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_error_reply:5 +#: flwr.common.message.Message.create_reply:9 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"Time-to-live for this message in seconds. If unset, it will be set based " +"on the remaining time for the received message before it expires. This " +"follows the equation:" +msgstr "该信息的有效时间(秒)。如果未设置,则将根据接收到的信息过期前的剩余时间来设置。其计算公式如下" -#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of -msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." -msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" +#: flwr.common.message.Message.create_error_reply:9 +#: flwr.common.message.Message.create_reply:13 of +#, fuzzy +msgid "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" +msgstr "ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at)" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:3 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The method generates a new `Message` as a reply to this message. It " +"inherits 'run_id', 'src_node_id', 'dst_node_id', and 'message_type' from " +"this message and sets 'reply_to_message' to the ID of this message." msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"该方法会生成一条新的 \"信息\",作为对该信息的回复。该方法继承了该消息的 " +"\"run_id\"、\"src_node_id\"、\"dst_node_id \"和 \"message_type\",并将 " +"\"reply_to_message \"设置为该消息的 ID。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of -msgid "Configure the next round of evaluation using the specified strategy." -msgstr "使用指定策略配置下一轮评估。" +#: flwr.common.message.Message.create_reply:7 of +#, fuzzy +msgid "The content for the reply message." +msgstr "回复信息的内容。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Message.create_reply:16 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "A new `Message` instance representing the reply." +msgstr "代表回复的新的 `Message` 实例。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageType.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "MessageType" +msgstr "返回类型" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of -msgid "Evaluate model parameters using an evaluation function from the strategy." -msgstr "使用策略中的评估函数评估模型参数。" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#, fuzzy +msgid ":py:obj:`EVALUATE `\\" +msgstr ":py:obj:`EVALUATE `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`QUERY `\\" +msgstr ":py:obj:`QUERY `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of -msgid "Initialize global model parameters using given strategy." -msgstr "使用给定的策略初始化全局模型参数。" +#: ../../source/ref-api/flwr.common.MessageType.rst:30::1 +#, fuzzy +msgid ":py:obj:`TRAIN `\\" +msgstr ":py:obj:`TRAIN `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 -#: flwr.server.strategy.strategy.Strategy.configure_fit:3 -#: flwr.server.strategy.strategy.Strategy.evaluate:6 of -msgid "The current round of federated learning." -msgstr "本轮联邦学习。" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:2 +#, fuzzy +msgid "MessageTypeLegacy" +msgstr "MessageTypeLegacy" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 -#: flwr.server.strategy.strategy.Strategy.configure_fit:7 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of -msgid "The client manager which holds all currently connected clients." -msgstr "客户端管理器,用于管理当前连接的所有客户端。" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#, fuzzy +msgid ":py:obj:`GET_PARAMETERS `\\" +msgstr ":py:obj:`GET_PARAMETERS `\\" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 -#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of -msgid "" -"**evaluate_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `EvaluateIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated evaluation." -msgstr "" -"**evaluate_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" -" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" +#: ../../source/ref-api/flwr.common.MessageTypeLegacy.rst:29::1 +#, fuzzy +msgid ":py:obj:`GET_PROPERTIES `\\" +msgstr ":py:obj:`GET_PROPERTIES `\\" -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 -msgid "DPFedAvgFixed" -msgstr "DPFedAvgFixed" +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.run_id:1 flwr.common.message.Metadata:3 of +#, fuzzy +msgid "An identifier for the current run." +msgstr "当前运行的标识符。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 -#: flwr.server.strategy.fedavg.FedAvg:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_id:1 flwr.common.message.Metadata:5 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" -msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgid "An identifier for the current message." +msgstr "当前信息的标识符。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.src_node_id:1 flwr.common.message.Metadata:7 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "An identifier for the node sending this message." +msgstr "发送此信息的节点的标识符。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.dst_node_id:1 flwr.common.message.Metadata:9 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of -msgid "Aggregate training results using unweighted aggregation." -msgstr "使用非加权汇总法汇总训练结果。" +msgid "An identifier for the node receiving this message." +msgstr "接收此信息的节点的标识符。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.reply_to_message:1 flwr.common.message.Metadata:11 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "An identifier for the message this message replies to." +msgstr "该信息回复的信息的标识符。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Metadata:13 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of -msgid "" -"Configure the next round of training incorporating Differential Privacy " -"(DP)." -msgstr "配置包含差分隐私 (DP) 的下一轮训练。" +"An identifier for grouping messages. In some settings, this is used as " +"the FL round." +msgstr "用于分组报文的标识符。在某些设置中,它被用作 FL 轮。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.message.Metadata:16 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Time-to-live for this message in seconds." +msgstr "该信息的有效时间。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.message_type:1 flwr.common.message.Metadata:18 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" - -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of -msgid "" -"Configuration of the next training round includes information related to " -"DP, such as clip norm and noise stddev." -msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" +msgid "A string that encodes the action to be executed on the receiving end." +msgstr "编码接收端要执行的操作的字符串。" -#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 -#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of -msgid "" -"**fit_configuration** -- A list of tuples. Each tuple in the list " -"identifies a `ClientProxy` and the `FitIns` for this particular " -"`ClientProxy`. If a particular `ClientProxy` is not included in this " -"list, it means that this `ClientProxy` will not participate in the next " -"round of federated learning." -msgstr "" -"**fit_configuration** -- " -"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" +#: flwr.common.Metadata.created_at:1::1 of +#, fuzzy +msgid ":py:obj:`created_at `\\" +msgstr ":py:obj:`ttl `\\" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +#: flwr.common.Metadata.created_at:1 +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "DifferentialPrivacyClientSideAdaptiveClipping" -msgstr "DifferentialPrivacyClientSideAdaptiveClipping" +msgid "Unix timestamp when the message was created." +msgstr "创建信息时的 Unix 时间戳。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "Use `adaptiveclipping_mod` modifier at the client side." -msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" +msgid ":py:obj:`dst_node_id `\\" +msgstr ":py:obj:`dst_node_id `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " -"happen on the client-side, usually by using the built-in " -"`adaptiveclipping_mod`." -msgstr "" -"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " -"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使用内置的 " -"`adaptiveclipping_mod`。" +msgid ":py:obj:`group_id `\\" +msgstr ":py:obj:`group_id `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 -#: of +#: flwr.common.Metadata.created_at:1::1 +#: flwr.common.Metadata.group_id:1 of #, fuzzy -msgid "The strategy to which DP functionalities will be added by this wrapper." -msgstr "该包装器将添加 DP 功能的策略。" +msgid "An identifier for grouping messages." +msgstr "用于分组信息的标识符。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "The noise multiplier for the Gaussian mechanism for model updates." -msgstr "用于模型更新的高斯机制的噪声乘数。" +msgid ":py:obj:`message_id `\\" +msgstr ":py:obj:`message_id `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "The number of clients that are sampled on each round." -msgstr "每轮取样的客户数。" +msgid ":py:obj:`message_type `\\" +msgstr ":py:obj:`message_type `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " -"recommends to set to 0.1." -msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" +msgid ":py:obj:`reply_to_message `\\" +msgstr ":py:obj:`reply_to_message `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." -msgstr "需要剪切的更新量化值。默认为 0.5。" +msgid ":py:obj:`run_id `\\" +msgstr ":py:obj:`run_id `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -"The learning rate for the clipping norm adaptation. Defaults to 0.2. " -"Andrew et al. recommends to set to 0.2." -msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" +msgid ":py:obj:`src_node_id `\\" +msgstr ":py:obj:`src_node_id `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 -#: of +#: flwr.common.Metadata.created_at:1::1 of #, fuzzy -msgid "" -"The stddev of the noise added to the count of updates currently below the" -" estimate. Andrew et al. recommends to set to `expected_num_records/20`" -msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 \"expected_num_records/20" +msgid ":py:obj:`ttl `\\" +msgstr ":py:obj:`ttl `\\" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: flwr.common.Metadata.created_at:1::1 flwr.common.Metadata.ttl:1 #: of #, fuzzy -msgid "Create a strategy:" -msgstr "server.strategy" +msgid "Time-to-live for this message." +msgstr "该信息的有效时间。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 -#: of +#: ../../source/ref-api/flwr.common.Metrics.rst:2 #, fuzzy -msgid "" -"Wrap the strategy with the " -"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" +msgid "Metrics" +msgstr "MetricsRecord" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 -#: of +#: ../../source/ref-api/flwr.common.MetricsRecord.rst:2 #, fuzzy -msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" -msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" +msgid "MetricsRecord" +msgstr "MetricsRecord" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.metricsrecord.MetricsRecord:1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`int` | :py:class:`float` | " +":py:class:`list`\\ [:py:class:`int`] | :py:class:`list`\\ " +"[:py:class:`float`]]" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:obj:`~typing.Union`\\ [:py:class:`int`, " +":py:class:`float`, :py:class:`~typing.List`\\ [:py:class:`int`], " +":py:class:`~typing.List`\\ [:py:class:`float`]]]" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"A :code:`MetricsRecord` is a Python dictionary designed to ensure that " +"each key-value pair adheres to specified data types. A " +":code:`MetricsRecord` is one of the types of records that a " +"`flwr.common.RecordSet `_ supports " +"and can therefore be used to construct :code:`common.Message` objects." msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 -#: of -#, fuzzy -msgid "Aggregate training results and update clip norms." -msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" +#: flwr.common.record.metricsrecord.MetricsRecord:9 of +msgid "" +"A dictionary that stores basic types (i.e. `int`, `float` as defined in " +"`MetricsScalar`) and list of such types (see `MetricsScalarList`)." +msgstr "" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:12 of msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"A boolean indicating whether metrics should be deleted from the input " +"dictionary immediately after adding them to the record. When set to True," +" the data is duplicated in memory. If memory is a concern, set it to " +"False." msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:20 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"The usage of a :code:`MetricsRecord` is envisioned for communicating " +"results obtained when a node performs an action. A few typical examples " +"include: communicating the training accuracy after a model is trained " +"locally by a :code:`ClientApp`, reporting the validation loss obtained at" +" a :code:`ClientApp`, or, more generally, the output of executing a query" +" by the :code:`ClientApp`. Common to these examples is that the output " +"can be typically represented by a single scalar (:code:`int`, " +":code:`float`) or list of scalars." msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:28 of msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"Let's see some examples of how to construct a :code:`MetricsRecord` from " +"scratch:" msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.metricsrecord.MetricsRecord:39 of msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"Since types are enforced, the types of the objects inserted are checked. " +"For a :code:`MetricsRecord`, value types allowed are those in defined in " +":code:`flwr.common.MetricsRecordValues`. Similarly, only :code:`str` keys" +" are allowed." msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 +#: flwr.common.record.metricsrecord.MetricsRecord:50 of +msgid "" +"If you need a more versatily type of record try :code:`ConfigsRecord` or " +":code:`ParametersRecord`." +msgstr "" + +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "DifferentialPrivacyClientSideFixedClipping" -msgstr "差分隐私" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Use `fixedclipping_mod` modifier at the client side." -msgstr "在客户端使用 `fixedclipping_mod` 修改器。" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " -"performs clipping on the server-side, " -"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " -"on the client-side, usually by using the built-in `fixedclipping_mod`." -msgstr "" -"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping " -"\"相比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常是使用内置的 " -"\"fixedclipping_mod\"。" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -"The noise multiplier for the Gaussian mechanism for model updates. A " -"value of 1.0 or higher is recommended for strong privacy." -msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " -"wrapper:" -msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" -msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" + +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Add noise to the aggregated parameters." -msgstr "然后将汇总结果序列化:" +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.NDArray.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "NDArray" +msgstr "NDArray" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.NDArrays.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid "NDArrays" +msgstr "NDArray" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 #, fuzzy -msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +msgid ":py:obj:`tensors `\\" +msgstr ":py:obj:`tensors `\\" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.Parameters.rst:29::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid ":py:obj:`tensor_type `\\" +msgstr ":py:obj:`tensor_type `\\" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +#: ../../source/ref-api/flwr.common.ParametersRecord.rst:2 #, fuzzy -msgid "DifferentialPrivacyServerSideAdaptiveClipping" -msgstr "DifferentialPrivacyServerSideAdaptiveClipping" +msgid "ParametersRecord" +msgstr "参数" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:1 of #, fuzzy msgid "" -"The standard deviation of the noise added to the count of updates below " -"the estimate. Andrew et al. recommends to set to " -"`expected_num_records/20`" -msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 \"expected_num_records/20" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" +msgstr "" +"Bases: :py:class:`~flwr.common.record.typeddict.TypedDict`\\ " +"[:py:class:`str`, :py:class:`~flwr.common.record.parametersrecord.Array`]" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:3 of #, fuzzy msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " -"wrapper" -msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" +"A dataclass storing named Arrays in order. This means that it holds " +"entries as an OrderedDict[str, Array]. ParametersRecord objects can be " +"viewed as an equivalent to PyTorch's state_dict, but holding serialised " +"tensors instead. A :code:`ParametersRecord` is one of the types of " +"records that a `flwr.common.RecordSet " +"`_ supports and can therefore be " +"used to construct :code:`common.Message` objects." +msgstr "" +"按顺序存储命名数组的数据类。这意味着它以 OrderedDict[str, Array] 的形式保存条目。ParametersRecord " +"对象相当于 PyTorch 的 state_dict,但它保存的是序列化的张量。" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:10 of +msgid "A dictionary that stores serialized array-like or tensor-like objects." +msgstr "" + +#: flwr.common.record.parametersrecord.ParametersRecord:12 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +"A boolean indicating whether parameters should be deleted from the input " +"dictionary immediately after adding them to the record. If False, the " +"dictionary passed to `set_parameters()` will be empty once exiting from " +"that function. This is the desired behaviour when working with very large" +" models/tensors/arrays. However, if you plan to continue working with " +"your parameters after adding it to the record, set this flag to True. " +"When set to True, the data is duplicated in memory." msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:23 of msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +"The usage of :code:`ParametersRecord` is envisioned for storing data " +"arrays (e.g. parameters of a machine learning model). These first need to" +" be serialized into a :code:`flwr.common.Array` data structure." msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.parametersrecord.ParametersRecord:27 of #, fuzzy +msgid "Let's see some examples:" +msgstr "让我们来看几个例子:" + +#: flwr.common.record.parametersrecord.ParametersRecord:50 of msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"Now that the NumPy array is embedded into a :code:`ParametersRecord` it " +"could be sent if added as part of a :code:`common.Message` or it could be" +" saved as a persistent state of a :code:`ClientApp` via its context. " +"Regardless of the usecase, we will sooner or later want to recover the " +"array in its original NumPy representation. For the example above, where " +"the array was serialized using the built-in utility function, " +"deserialization can be done as follows:" msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:65 of msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +"If you need finer control on how your arrays are serialized and " +"deserialized, you can construct :code:`Array` objects directly like this:" msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.parametersrecord.ParametersRecord:83 of msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +"Note that different arrays (e.g. from PyTorch, Tensorflow) might require " +"different serialization mechanism. Howerver, they often support a " +"conversion to NumPy, therefore allowing to use the same or similar steps " +"as in the example above." msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +msgid ":py:obj:`clear `\\ \\(\\)" +msgstr ":py:obj:`clear `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "DifferentialPrivacyServerSideFixedClipping" -msgstr "差分隐私" +msgid ":py:obj:`count_bytes `\\ \\(\\)" +msgstr ":py:obj:`count_bytes `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " -"wrapper" -msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\" -" \\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`items `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\" -" \\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`keys `\\ \\(\\)" +msgstr ":py:obj:`keys `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "Compute the updates, clip, and pass them for aggregation." -msgstr "计算更新、剪辑并将其传递给聚合。" +msgid ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`pop `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`popitem `\\ \\(\\)" +msgstr ":py:obj:`items `\\ \\(\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`setdefault `\\ " +"\\(k\\[\\,d\\]\\)" +msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" msgstr "" -":py:obj:`evaluate " -"`\\" -" \\(server\\_round\\, parameters\\)" +":py:obj:`update `\\ \\(\\[E\\, " +"\\]\\*\\*F\\)" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 -#: of +#: collections.abc.MutableMapping.clear:1::1 of +#, fuzzy +msgid ":py:obj:`values `\\ \\(\\)" +msgstr ":py:obj:`values `\\ \\(\\)" + +#: flwr.common.record.parametersrecord.ParametersRecord.count_bytes:3 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\" -" \\(client\\_manager\\)" +"Note that a small amount of Bytes might also be included in this counting" +" that correspond to metadata of the serialized object (e.g. of NumPy " +"array) needed for deseralization." +msgstr "请注意,该计数中还可能包含少量字节,这些字节与序列化对象(如 NumPy 数组)的元数据相对应,需要进行去eralization。" -#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 -#: of +#: ../../source/ref-api/flwr.common.Properties.rst:2 #, fuzzy -msgid "Afterward, add noise to the aggregated parameters." -msgstr "然后,在汇总参数中添加噪声。" +msgid "Properties" +msgstr "GetPropertiesRes" -#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:2 #, fuzzy -msgid "FaultTolerantFedAvg" -msgstr "server.strategy.FaultTolerantFedAvg" +msgid "ReconnectIns" +msgstr "启用 SSL 连接" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.ReconnectIns.rst:28::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`seconds `\\" +msgstr ":py:obj:`seconds `\\" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.common.RecordSet.rst:2 #, fuzzy +msgid "RecordSet" +msgstr "RecordSet" + +#: flwr.common.record.recordset.RecordSet:3 of msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +"A :code:`RecordSet` is the unified mechanism by which parameters, metrics" +" and configs can be either stored as part of a `flwr.common.Context " +"`_ in your apps or communicated as part of a " +"`flwr.common.Message `_ between your apps." msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" - -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 -#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 -#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 -#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using weighted average." -msgstr "使用加权平均法汇总拟合结果。" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:9 of msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`ParametersRecords` that can be used to record and " +"communicate model parameters and high-dimensional arrays." msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:12 of msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"A dictionary of :code:`MetricsRecord` that can be used to record and " +"communicate scalar-valued metrics that are the result of performing and " +"action, for example, by a :code:`ClientApp`." msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:16 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"A dictionary of :code:`ConfigsRecord` that can be used to record and " +"communicate configuration values to an entity (e.g. to a " +":code:`ClientApp`) for it to adjust how an action is performed." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:24 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"A :code:`RecordSet` can hold three types of records, each designed with " +"an specific purpose. What is common to all of them is that they are " +"Python dictionaries designed to ensure that each key-value pair adheres " +"to specified data types." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of +#: flwr.common.record.recordset.RecordSet:29 of #, fuzzy +msgid "Let's see an example." +msgstr "让我们来看几个例子:" + +#: flwr.common.record.recordset.RecordSet:47 of msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Adding a :code:`ParametersRecord` follows the same steps as above but " +"first, the array needs to be serialized and represented as a " +":code:`flwr.common.Array`. If the array is a :code:`NumPy` array, you can" +" use the built-in utility function `array_from_numpy " +"`_. It is often possible to convert an" +" array first to :code:`NumPy` and then use the aforementioned function." msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.common.record.recordset.RecordSet:66 of msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"For additional examples on how to construct each of the records types " +"shown above, please refer to the documentation for :code:`ConfigsRecord`," +" :code:`MetricsRecord` and :code:`ParametersRecord`." msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 -#: ../../source/ref-changelog.md:997 -msgid "FedAdagrad" -msgstr "FedAdagrad" +#: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy +msgid ":py:obj:`configs_records `\\" +msgstr ":py:obj:`configs_records `\\" -#: flwr.server.strategy.fedadagrad.FedAdagrad:1 -#: flwr.server.strategy.fedadam.FedAdam:1 -#: flwr.server.strategy.fedyogi.FedYogi:1 of +#: flwr.common.RecordSet.configs_records:1 +#: flwr.common.RecordSet.configs_records:1::1 of #, fuzzy -msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" -msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgid "Dictionary holding ConfigsRecord instances." +msgstr "包含 ConfigsRecord 实例的字典。" -#: flwr.server.strategy.fedadagrad.FedAdagrad:3 -#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 -#: flwr.server.strategy.fedyogi.FedYogi:3 of -msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" -msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" +#: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy +msgid ":py:obj:`metrics_records `\\" +msgstr ":py:obj:`metrics_records `\\" -#: flwr.server.strategy.fedadagrad.FedAdagrad:21 -#: flwr.server.strategy.fedadagrad.FedAdagrad:23 -#: flwr.server.strategy.fedadam.FedAdam:25 -#: flwr.server.strategy.fedadam.FedAdam:27 -#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 -#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 -#: flwr.server.strategy.fedprox.FedProx:61 -#: flwr.server.strategy.fedprox.FedProx:63 -#: flwr.server.strategy.fedyogi.FedYogi:28 -#: flwr.server.strategy.fedyogi.FedYogi:30 of -msgid "Metrics aggregation function, optional." -msgstr "指标汇总功能,可选。" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.metrics_records:1 of +#, fuzzy +msgid "Dictionary holding MetricsRecord instances." +msgstr "保存 MetricsRecord 实例的字典。" -#: flwr.server.strategy.fedadagrad.FedAdagrad:29 -#: flwr.server.strategy.fedadam.FedAdam:29 -#: flwr.server.strategy.fedopt.FedOpt:29 of -msgid "Server-side learning rate. Defaults to 1e-1." -msgstr "服务器端学习率。默认为 1e-1。" +#: flwr.common.RecordSet.configs_records:1::1 of +#, fuzzy +msgid ":py:obj:`parameters_records `\\" +msgstr ":py:obj:`parameters_records `\\" -#: flwr.server.strategy.fedadagrad.FedAdagrad:31 -#: flwr.server.strategy.fedadam.FedAdam:31 -#: flwr.server.strategy.fedopt.FedOpt:31 of -msgid "Client-side learning rate. Defaults to 1e-1." -msgstr "客户端学习率。默认为 1e-1。" +#: flwr.common.RecordSet.configs_records:1::1 +#: flwr.common.RecordSet.parameters_records:1 of +#, fuzzy +msgid "Dictionary holding ParametersRecord instances." +msgstr "存放 ParametersRecord 实例的字典。" -#: flwr.server.strategy.fedadagrad.FedAdagrad:33 -#: flwr.server.strategy.fedadam.FedAdam:37 -#: flwr.server.strategy.fedopt.FedOpt:37 of -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." -msgstr "控制算法的适应度。默认为 1e-9。" +#: ../../source/ref-api/flwr.common.ServerMessage.rst:2 +#, fuzzy +msgid "ServerMessage" +msgstr "服务器端" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`evaluate_ins `\\" +msgstr ":py:obj:`evaluate_ins `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\" -" \\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`fit_ins `\\" +msgstr ":py:obj:`fit_ins `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`get_parameters_ins " +"`\\" msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`get_parameters_ins " +"`\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ServerMessage.rst:31::1 #, fuzzy msgid "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`get_properties_ins " +"`\\" msgstr "" -":py:obj:`configure_fit `\\" -" \\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`get_properties_ins " +"`\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Status.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Status" +msgstr "客户端状态。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Status.rst:29::1 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`code `\\" +msgstr ":py:obj:`code `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.Status.rst:29::1 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`message `\\" +msgstr ":py:obj:`message `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.array_from_numpy.rst:2 #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "array\\_from\\_numpy" +msgstr "array\\_from\\_numpy" -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 +#: ../../source/ref-api/flwr.common.bytes_to_ndarray.rst:2 #, fuzzy -msgid "FedAdam" -msgstr "FedAdagrad" +msgid "bytes\\_to\\_ndarray" +msgstr "bytes\\_to\\_ndarray" -#: flwr.server.strategy.fedadam.FedAdam:33 -#: flwr.server.strategy.fedyogi.FedYogi:36 of -msgid "Momentum parameter. Defaults to 0.9." -msgstr "动量参数。默认为 0.9。" +#: ../../source/ref-api/flwr.common.configure.rst:2 +#, fuzzy +msgid "configure" +msgstr "配置日志记录" -#: flwr.server.strategy.fedadam.FedAdam:35 -#: flwr.server.strategy.fedyogi.FedYogi:38 of -msgid "Second moment parameter. Defaults to 0.99." -msgstr "第二动量参数。默认为 0.99。" +#: ../../source/ref-api/flwr.common.event.rst:2 +#, fuzzy +msgid "event" +msgstr "事件" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.log.rst:2 #, fuzzy +msgid "log" +msgstr "登录" + +#: logging.Logger.log:3 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "要传递异常信息,请使用带 true 值的关键字参数 exc_info,例如。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" + +#: ../../source/ref-api/flwr.common.ndarray_to_bytes.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "ndarray\\_to\\_bytes" +msgstr "ndarray\\_to\\_bytes" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.ndarrays_to_parameters.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +msgid "ndarrays\\_to\\_parameters" +msgstr "ndarrays\\_to\\_parameters" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.now.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "now" +msgstr "现在" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.common.parameters_to_ndarrays.rst:2 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "parameters\\_to\\_ndarrays" +msgstr "parameters\\_to\\_ndarrays" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:2 +msgid "server" +msgstr "服务器" + +#: ../../source/ref-api/flwr.server.rst:22::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`start_server `\\ \\(\\*\\[\\, " +"server\\_address\\, server\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:22::1 +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "使用 gRPC 传输层启动 Flower 服务器。" + +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`ClientManager `\\ \\(\\)" +msgstr ":py:obj:`ClientManager `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.ClientManager:1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Abstract base class for managing Flower clients." +msgstr "Flower 客户端的抽象基类。" -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "FedAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`Driver `\\ \\(\\)" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg:3 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of -msgid "Implementation based on https://arxiv.org/abs/1602.05629" -msgstr "实施基于 https://arxiv.org/abs/1602.05629" - -#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 -#: of -msgid "" -"Fraction of clients used during training. In case `min_fit_clients` is " -"larger than `fraction_fit * available_clients`, `min_fit_clients` will " -"still be sampled. Defaults to 1.0." -msgstr "" -"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " -"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.driver.driver.Driver:1 of +#, fuzzy +msgid "Abstract base Driver class for the Driver API." +msgstr "Flower 客户端的抽象基类。" -#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 -#: of -msgid "" -"Fraction of clients used during validation. In case " -"`min_evaluate_clients` is larger than `fraction_evaluate * " -"available_clients`, `min_evaluate_clients` will still be sampled. " -"Defaults to 1.0." -msgstr "" -"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " -"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" +#: ../../source/ref-api/flwr.server.rst:37::1 +#, fuzzy +msgid ":py:obj:`History `\\ \\(\\)" +msgstr ":py:obj:`History `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg:33 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.history.History:1 of #, fuzzy -msgid "Enable (True) or disable (False) in-place aggregation of model updates." -msgstr "启用(真)或禁用(假)模型更新的就地聚合。" +msgid "History class for training and/or evaluation metrics collection." +msgstr "**hist** -- 包含训练和评估指标的对象。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`LegacyContext `\\ \\(context\\[\\, " +"config\\, strategy\\, ...\\]\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +":py:obj:`LegacyContext `\\ \\(state\\[\\, " +"config\\, strategy\\, ...\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.compat.legacy_context.LegacyContext:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Legacy Context." +msgstr "传承背景。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +":py:obj:`Server `\\ \\(\\*\\, client\\_manager\\[\\, " +"strategy\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`ServerApp `\\ \\(\\[server\\, config\\, " +"strategy\\, ...\\]\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_app.ServerApp:1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Flower ServerApp." +msgstr "Flower 服务器。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`ServerAppComponents `\\ " +"\\(\\[server\\, config\\, ...\\]\\)" +msgstr "server.strategy.Strategy" + +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.serverapp_components.ServerAppComponents:1 of +msgid "Components to construct a ServerApp." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`ServerConfig `\\ \\(\\[num\\_rounds\\," +" round\\_timeout\\]\\)" msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.server_config.ServerConfig:1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +msgid "Flower server config." +msgstr "Flower 服务器。" -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 +#: ../../source/ref-api/flwr.server.rst:37::1 #, fuzzy -msgid "FedAvgAndroid" -msgstr "DPFedAvgAdaptive" +msgid ":py:obj:`SimpleClientManager `\\ \\(\\)" +msgstr ":py:obj:`SimpleClientManager `\\ \\(\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:37::1 +#: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid "Provides a pool of available clients." +msgstr "使用部分可用客户进行评估。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`flwr.server.strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "包含策略抽象和不同的实现方法。" + +#: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" -msgstr "" -":py:obj:`bytes_to_ndarray " -"`\\ \\(tensor\\)" +msgid ":py:obj:`flwr.server.workflow `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of +#: ../../source/ref-api/flwr.server.rst:56::1 +#: flwr.server.workflow:1 of #, fuzzy -msgid "Deserialize NumPy array from bytes." -msgstr "从字节反序列化 NumPy ndarray。" +msgid "Workflows." +msgstr "工作流程" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ClientManager.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "ClientManager" +msgstr "客户端" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`all `\\ \\(\\)" +msgstr ":py:obj:`all `\\ \\(\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1 +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.all:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Return all available clients." +msgstr "返回所有可用客户。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid ":py:obj:`num_available `\\ \\(\\)" +msgstr ":py:obj:`num_available `\\ \\(\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.num_available:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.num_available:1 of #, fuzzy -msgid "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" -msgstr "" -":py:obj:`ndarray_to_bytes " -"`\\ \\(ndarray\\)" +msgid "Return the number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "Serialize NumPy array to bytes." -msgstr "将 NumPy ndarray 序列化为字节。" +msgid ":py:obj:`register `\\ \\(client\\)" +msgstr ":py:obj:`register `\\ \\(client\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.register:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.register:1 of +#, fuzzy +msgid "Register Flower ClientProxy instance." +msgstr "注册 Flower ClientProxy 实例。" + +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`ndarrays_to_parameters " -"`\\ " -"\\(ndarrays\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.sample:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.sample:1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Sample a number of Flower ClientProxy instances." +msgstr "取样若干 Flower ClientProxy 实例。" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid ":py:obj:`unregister `\\ \\(client\\)" +msgstr ":py:obj:`unregister `\\ \\(client\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.unregister:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.unregister:1 of +#, fuzzy +msgid "Unregister Flower ClientProxy instance." +msgstr "取消注册 Flower ClientProxy 实例。" + +#: flwr.server.client_manager.ClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" msgstr "" -":py:obj:`parameters_to_ndarrays " -"`\\ " -"\\(parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\, timeout\\)" -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 -#: of +#: flwr.server.client_manager.ClientManager.all:1::1 +#: flwr.server.client_manager.ClientManager.wait_for:1 +#: flwr.server.client_manager.SimpleClientManager.all:1::1 +#: flwr.server.client_manager.SimpleClientManager.wait_for:1 of #, fuzzy -msgid "Convert parameters object to NumPy weights." -msgstr "将参数对象转换为 NumPy ndarrays。" +msgid "Wait until at least `num_clients` are available." +msgstr "等待至少 `num_clients` 可用。" -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#: flwr.server.client_manager.ClientManager.num_available:3 +#: flwr.server.client_manager.SimpleClientManager.num_available:3 of #, fuzzy -msgid "FedAvgM" -msgstr "DP-FedAvg" +msgid "**num_available** -- The number of currently available clients." +msgstr "**num_available** -- 当前可用客户端的数量。" -#: flwr.server.strategy.fedavgm.FedAvgM:3 of +#: flwr.server.client_manager.ClientManager.register:6 +#: flwr.server.client_manager.SimpleClientManager.register:6 of #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/1909.06335" -msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" - -#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"Server-side learning rate used in server-side optimization. Defaults to " -"1.0." -msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" +"**success** -- Indicating if registration was successful. False if " +"ClientProxy is already registered or can not be registered for any " +"reason." +msgstr "**success** -- 表示注册是否成功。如果 ClientProxy 已注册或因故无法注册,则为 False。" -#: flwr.server.strategy.fedavgm.FedAvgM:28 of -msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." -msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" +#: flwr.server.client_manager.ClientManager.unregister:3 +#: flwr.server.client_manager.SimpleClientManager.unregister:3 of +#, fuzzy +msgid "This method is idempotent." +msgstr "这种方法是幂等的。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +msgid "Driver" +msgstr "服务器" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\[\\, ttl\\]\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`create_message `\\ " +"\\(content\\, message\\_type\\, ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.create_message:1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +msgid "Create a new message with specified parameters." +msgstr "使用指定参数创建新信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`get_node_ids `\\ \\(\\)" +msgstr ":py:obj:`get_node_ids `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.get_node_ids:1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Get node IDs." +msgstr "获取节点 ID。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`pull_messages `\\ " +"\\(message\\_ids\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.pull_messages:1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Pull messages based on message IDs." +msgstr "根据信息 ID 提取信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`push_messages `\\ " +"\\(messages\\)" -#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.push_messages:1 of #, fuzzy -msgid "FedMedian" -msgstr "联邦医保" +msgid "Push messages to specified node IDs." +msgstr "向指定的节点 ID 推送信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`send_and_receive `\\ " +"\\(messages\\, \\*\\[\\, timeout\\]\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Driver.rst:38::1 +#: flwr.server.driver.driver.Driver.send_and_receive:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid "Push messages to specified node IDs and pull the reply messages." +msgstr "向指定的节点 ID 推送信息并提取回复信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of -msgid "Aggregate fit results using median." -msgstr "使用中位数汇总拟合结果。" +#: flwr.server.driver.driver.Driver.create_message:1::1 of +#, fuzzy +msgid ":py:obj:`run `\\" +msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.Driver.run:1 +#: flwr.server.driver.driver.Driver.create_message:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Run information." +msgstr "运行模拟" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:3 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"This method constructs a new `Message` with given content and metadata. " +"The `run_id` and `src_node_id` will be set automatically." +msgstr "本方法使用给定的内容和元数据构建新的 `Message` 。run_id \"和 \"src_node_id \"将自动设置。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:6 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"The content for the new message. This holds records that are to be sent " +"to the destination node." +msgstr "新信息的内容。其中包含要发送到目的节点的记录。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:9 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The type of the message, defining the action to be executed on the " +"receiving end." +msgstr "信息类型,定义接收端要执行的操作。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:12 of +#, fuzzy +msgid "The ID of the destination node to which the message is being sent." +msgstr "信息发送目的地节点的 ID。" + +#: flwr.server.driver.driver.Driver.create_message:14 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"The ID of the group to which this message is associated. In some " +"settings, this is used as the FL round." +msgstr "与该信息相关联的组的 ID。在某些设置中,它被用作 FL 轮。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.create_message:17 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"Time-to-live for the round trip of this message, i.e., the time from " +"sending this message to receiving a reply. It specifies in seconds the " +"duration for which the message and its potential reply are considered " +"valid. If unset, the default TTL (i.e., `common.DEFAULT_TTL`) will be " +"used." +msgstr "此报文往返的有效时间,即从发送此报文到收到回复的时间。它规定了信息及其潜在回复被视为有效的持续时间。" -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +#: flwr.server.driver.driver.Driver.create_message:23 of #, fuzzy -msgid "FedOpt" -msgstr "FedOpt" +msgid "" +"**message** -- A new `Message` instance with the specified content and " +"metadata." +msgstr "**message** -- 具有指定内容和元数据的新 \"信息 \"实例。" -#: flwr.server.strategy.fedopt.FedOpt:33 of -msgid "Momentum parameter. Defaults to 0.0." -msgstr "动量参数。默认为 0.0。" +#: flwr.server.driver.driver.Driver.pull_messages:3 of +#, fuzzy +msgid "" +"This method is used to collect messages from the SuperLink that " +"correspond to a set of given message IDs." +msgstr "该方法用于从超级链接中收集与一组给定消息 ID 相对应的消息。" -#: flwr.server.strategy.fedopt.FedOpt:35 of -msgid "Second moment parameter. Defaults to 0.0." -msgstr "第二动量参数。默认为 0.0。" +#: flwr.server.driver.driver.Driver.pull_messages:6 of +#, fuzzy +msgid "An iterable of message IDs for which reply messages are to be retrieved." +msgstr "要检索回复信息的信息 ID 的可迭代项。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.pull_messages:9 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +msgid "**messages** -- An iterable of messages received." +msgstr "**messages** -- 收到的信息迭代。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:3 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"This method takes an iterable of messages and sends each message to the " +"node specified in `dst_node_id`." +msgstr "该方法接收一个可迭代的消息,并将每条消息发送到 `dst_node_id` 中指定的节点。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:6 +#: flwr.server.driver.driver.Driver.send_and_receive:7 of #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +msgid "An iterable of messages to be sent." +msgstr "要发送的信息迭代。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.push_messages:9 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"**message_ids** -- An iterable of IDs for the messages that were sent, " +"which can be used to pull replies." +msgstr "**message_ids** -- 已发送信息的可迭代 ID,可用于提取回复信息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:3 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"This method sends a list of messages to their destination node IDs and " +"then waits for the replies. It continues to pull replies until either all" +" replies are received or the specified timeout duration is exceeded." +msgstr "该方法会向目标节点 ID 发送信息列表,然后等待回复。它会继续提取回复,直到收到所有回复或超过指定的超时时间。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:9 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"The timeout duration in seconds. If specified, the method will wait for " +"replies for this duration. If `None`, there is no time limit and the " +"method will wait until replies for all messages are received." +msgstr "超时时间(秒)。如果指定,该方法将在此期限内等待回复。如果指定为 \"无\",则没有时间限制,该方法将等待直到收到所有信息的回复。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:14 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "**replies** -- An iterable of reply messages received from the SuperLink." +msgstr "**replies** -- 从超级链接收到的回复信息的迭代。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.driver.driver.Driver.send_and_receive:19 of #, fuzzy msgid "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"This method uses `push_messages` to send the messages and `pull_messages`" +" to collect the replies. If `timeout` is set, the method may not return " +"replies for all sent messages. A message remains valid until its TTL, " +"which is not affected by `timeout`." msgstr "" -":py:obj:`num_fit_clients `\\" -" \\(num\\_available\\_clients\\)" +"该方法使用 `push_messages` 发送信息,并使用 `pull_messages` 收集回复。如果设置了 " +"`timeout`,该方法可能不会返回所有已发送消息的回复。消息在其 TTL 之前一直有效,不受 `timeout` 影响。" -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 +#: ../../source/ref-api/flwr.server.History.rst:2 #, fuzzy -msgid "FedProx" -msgstr "FedProx" - -#: flwr.server.strategy.fedprox.FedProx:3 of -msgid "Implementation based on https://arxiv.org/abs/1812.06127" -msgstr "实施基于 https://arxiv.org/abs/1812.06127" - -#: flwr.server.strategy.fedprox.FedProx:5 of -msgid "" -"The strategy in itself will not be different than FedAvg, the client " -"needs to be adjusted. A proximal term needs to be added to the loss " -"function during the training:" -msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" +msgid "History" +msgstr "历史" -#: flwr.server.strategy.fedprox.FedProx:9 of +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" -"\n" - -#: flwr.server.strategy.fedprox.FedProx:12 of -msgid "" -"Where $w^t$ are the global parameters and $w$ are the local weights the " -"function will be optimized with." -msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" - -#: flwr.server.strategy.fedprox.FedProx:15 of -msgid "In PyTorch, for example, the loss would go from:" -msgstr "例如,在 PyTorch 中,损失将从:" - -#: flwr.server.strategy.fedprox.FedProx:21 of -msgid "To:" -msgstr "致:" +":py:obj:`add_loss_centralized " +"`\\ \\(server\\_round\\, " +"loss\\)" -#: flwr.server.strategy.fedprox.FedProx:30 of -msgid "" -"With `global_params` being a copy of the parameters before the training " -"takes place." -msgstr "其中,\"global_params \"是训练前的参数副本。" +#: flwr.server.history.History.add_loss_centralized:1 +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy +msgid "Add one loss entry (from centralized evaluation)." +msgstr "集中评估" -#: flwr.server.strategy.fedprox.FedProx:65 of +#: flwr.server.history.History.add_loss_centralized:1::1 of +#, fuzzy msgid "" -"The weight of the proximal term used in the optimization. 0.0 makes this " -"strategy equivalent to FedAvg, and the higher the coefficient, the more " -"regularization will be used (that is, the client parameters will need to " -"be closer to the server parameters during training)." +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" msgstr "" -"优化中使用的近端项权重。0.0 使该策略等同于 " -"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" +":py:obj:`add_loss_distributed " +"`\\ \\(server\\_round\\, " +"loss\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_loss_distributed:1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +msgid "Add one loss entry (from distributed evaluation)." +msgstr "增加一个损失条目(来自分布式评估)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`add_metrics_centralized " +"`\\ \\(server\\_round\\, " +"metrics\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_centralized:1 of +#, fuzzy +msgid "Add metrics entries (from centralized evaluation)." +msgstr "集中评估" + +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`add_metrics_distributed " +"`\\ \\(server\\_round\\, " +"metrics\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Add metrics entries (from distributed evaluation)." +msgstr "定制的集中/分布式评估" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`add_metrics_distributed_fit " +"`\\ \\(server\\_round\\," +" ...\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.history.History.add_loss_centralized:1::1 +#: flwr.server.history.History.add_metrics_distributed_fit:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Add metrics entries (from distributed fit)." +msgstr "添加度量条目(来自分布式拟合)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:2 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "LegacyContext" +msgstr "遗留上下文" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.compat.legacy_context.LegacyContext:1 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Bases: :py:class:`~flwr.common.context.Context`" +msgstr "Bases: :py:class:`~flwr.common.context.Context`" -#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of -msgid "Sends the proximal factor mu to the clients" -msgstr "向客户发送近端因子mu" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "FedTrimmedAvg" -msgstr "server.strategy.FedTrimmedAvg" +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "Implemented based on: https://arxiv.org/abs/1803.01498" -msgstr "实施基于 https://arxiv.org/abs/1802.07927。" +msgid ":py:obj:`client_manager `\\" +msgstr ":py:obj:`client_manager `\\" -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of -msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." -msgstr "截取分布两个尾部的分数。默认为 0.2。" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`history `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`node_id `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +msgid ":py:obj:`node_config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of -msgid "Aggregate fit results using trimmed average." -msgstr "使用修剪平均值汇总拟合结果。" +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 +#, fuzzy +msgid ":py:obj:`state `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.LegacyContext.rst:35::1 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`run_config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.Server.rst:2 +msgid "Server" +msgstr "服务器" + +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid ":py:obj:`client_manager `\\ \\(\\)" +msgstr ":py:obj:`client_manager `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1 +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Return ClientManager." +msgstr "返回客户端(本身)。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`disconnect_all_clients " +"`\\ \\(timeout\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.disconnect_all_clients:1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Send shutdown signal to all clients." +msgstr "向所有客户端发送关闭信号。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`evaluate_round `\\ " +"\\(server\\_round\\, timeout\\)" -#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.evaluate_round:1 of #, fuzzy -msgid "FedXgbBagging" -msgstr "FedXgbBagging" +msgid "Validate current global model on a number of clients." +msgstr "当前(全局)模型参数。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" +msgstr ":py:obj:`fit `\\ \\(num\\_rounds\\, timeout\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit:1 of #, fuzzy -msgid "Aggregate evaluation metrics using average." -msgstr "采用加权平均法计算评估损失总额。" +msgid "Run federated averaging for a number of rounds." +msgstr "联邦平均动量策略。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" msgstr "" -":py:obj:`aggregate_fit " -"`\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`fit_round `\\ \\(server\\_round\\," +" timeout\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.fit_round:1 of #, fuzzy -msgid "Aggregate fit results using bagging." -msgstr "使用 Bulyan 技术汇总拟合结果。" +msgid "Perform a single round of federated averaging." +msgstr "本轮联邦学习。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" +msgstr "" +":py:obj:`set_max_workers `\\ " +"\\(max\\_workers\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_max_workers:1 of #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "Set the max_workers used by ThreadPoolExecutor." +msgstr "设置 ThreadPoolExecutor 使用的最大工作器数。" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`set_strategy `\\ \\(strategy\\)" +msgstr ":py:obj:`set_strategy `\\ \\(strategy\\)" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server.Server.client_manager:1::1 +#: flwr.server.server.Server.set_strategy:1 of #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "Replace server strategy." +msgstr "server.strategy" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerApp.rst:2 #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "ServerApp" +msgstr "服务器" -#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 -#: of +#: flwr.server.server_app.ServerApp:5 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Use the `ServerApp` with an existing `Strategy`:" +msgstr "使用现有策略" -#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 +#: flwr.server.server_app.ServerApp:17 of #, fuzzy -msgid "FedXgbCyclic" -msgstr "FedXgbCyclic" +msgid "Use the `ServerApp` with a custom main function:" +msgstr "使用带有自定义主函数的 `ServerApp`:" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`main `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.server_app.ServerApp.main:1 +#: flwr.server.server_app.ServerApp.main:1::1 of #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\," -" results\\, failures\\)" +msgid "Return a decorator that registers the main fn with the server app." +msgstr "返回向服务器应用程序注册 main fn 的装饰器。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:2 #, fuzzy -msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +msgid "ServerAppComponents" +msgstr "服务器" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.serverapp_components.ServerAppComponents:3 of #, fuzzy msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, one will be created internally." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of -#, fuzzy +#: flwr.server.app.start_server:9 +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "目前支持的值有:`num_rounds`(int,默认值:1)和以秒为单位的`round_timeout`(float,默认值:无)。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.serverapp_components.ServerAppComponents:9 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`flwr.server.strategy.FedAvg` will be used." msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: flwr.server.serverapp_components.ServerAppComponents:13 of #, fuzzy msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"An implementation of the class `flwr.server.ClientManager`. If no " +"implementation is provided, then `flwr.server.SimpleClientManager` will " +"be used." msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`client_manager " +"`\\" +msgstr ":py:obj:`client_manager `\\ \\(\\)" -#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "FedXgbNnAvg" -msgstr "DP-FedAvg" +msgid ":py:obj:`config `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "" -"This strategy is deprecated, but a copy of it is available in Flower " -"Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." -msgstr "" -"该策略已被弃用,但在 Flower Baselines: " -"https://github.com/adap/flower/tree/main/baselines/hfedxgboost 中有其副本。" +msgid ":py:obj:`server `\\" +msgstr ":py:obj:`run_server_app `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerAppComponents.rst:31::1 #, fuzzy -msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +msgid ":py:obj:`strategy `\\" +msgstr "server.strategy.Strategy" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:2 #, fuzzy -msgid "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" -msgstr "" -":py:obj:`aggregate_fit " -"`\\ \\(server\\_round\\, " -"results\\, failures\\)" +msgid "ServerConfig" +msgstr "服务器" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.server_config.ServerConfig:3 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"All attributes have default values which allows users to configure just " +"the ones they care about." +msgstr "所有属性都有默认值,用户只需配置自己关心的属性即可。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 #, fuzzy -msgid "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" -msgstr "" -":py:obj:`configure_fit " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +msgid ":py:obj:`num_rounds `\\" +msgstr ":py:obj:`num_rounds `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.ServerConfig.rst:29::1 #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid ":py:obj:`round_timeout `\\" +msgstr ":py:obj:`round_timeout `\\" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.SimpleClientManager.rst:2 #, fuzzy -msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +msgid "SimpleClientManager" +msgstr "SimpleClientManager" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager:1 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" - -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 -#, fuzzy -msgid "FedYogi" -msgstr "FedYogi" - -#: flwr.server.strategy.fedyogi.FedYogi:32 of -#, fuzzy -msgid "Server-side learning rate. Defaults to 1e-2." -msgstr "服务器端学习率。默认为 1e-1。" - -#: flwr.server.strategy.fedyogi.FedYogi:34 of -#, fuzzy -msgid "Client-side learning rate. Defaults to 0.0316." -msgstr "客户端学习率。默认为 1e-1。" +msgid "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" +msgstr "Bases: :py:class:`~flwr.server.client_manager.ClientManager`" -#: flwr.server.strategy.fedyogi.FedYogi:40 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy -msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." -msgstr "控制算法的适应度。默认为 1e-9。" +msgid ":py:obj:`all `\\ \\(\\)" +msgstr ":py:obj:`all `\\ \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`num_available `\\" +" \\(\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`num_available `\\" +" \\(\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`register `\\ " +"\\(client\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`register `\\ " +"\\(client\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`sample `\\ " +"\\(num\\_clients\\[\\, min\\_num\\_clients\\, criterion\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`unregister `\\ " +"\\(client\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.all:1::1 of #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`wait_for `\\ " +"\\(num\\_clients\\[\\, timeout\\]\\)" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:3 of #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Blocks until the requested number of clients is available or until a " +"timeout is reached. Current timeout default: 1 day." +msgstr "阻塞,直到请求的客户端数量可用或达到超时为止。当前超时默认值:1 天。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:6 of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "The number of clients to wait for." +msgstr "需要等待的客户数量。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#: flwr.server.client_manager.SimpleClientManager.wait_for:8 of #, fuzzy -msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "The time in seconds to wait for, defaults to 86400 (24h)." +msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" -#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +#: flwr.server.client_manager.SimpleClientManager.wait_for:11 of #, fuzzy -msgid "Krum" -msgstr "Krum" +msgid "**success**" +msgstr "**success**" -#: flwr.server.strategy.krum.Krum:3 of +#: ../../source/ref-api/flwr.server.start_server.rst:2 #, fuzzy -msgid "Implementation based on https://arxiv.org/abs/1703.02757" -msgstr "实施基于 https://arxiv.org/abs/2304.07537。" +msgid "start\\_server" +msgstr "server.start_server" -#: flwr.server.strategy.krum.Krum:17 of -msgid "" -"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" -" that case classical Krum is applied." -msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:5 of msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" -msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\, " -"results\\, ...\\)" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:12 of msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" - -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 -#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of -msgid "Aggregate fit results using Krum." -msgstr "使用 Krum 汇总拟合结果。" +"抽象基类 `flwr.server.strategy.Strategy` 的实现。如果没有提供策略,`start_server` 将使用 " +"`flwr.server.strategy.FedAvg`。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:16 of msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\, " -"parameters\\, ...\\)" +"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_server` 将使用 " +"`flwr.server.client_manager.SimpleClientManager`。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:21 of msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +"可与 Flower 客户端交换的 gRPC 消息的最大长度:默认值对大多数模型都足够了。训练超大模型的用户可能需要增加该值。请注意,Flower " +"客户端需要以相同的值启动(请参阅 `flwr.client.start_client`),否则客户端将不知道已增加的限制并阻止更大的消息。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +"包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素: * CA 证书,* 服务器证书, * " +"服务器私钥。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy +#: flwr.server.app.start_server:28 of msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" -msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "包含根证书、服务器证书和私钥的元组,用于启动启用 SSL 的安全服务器。元组应按以下顺序包含三个字节元素:" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "CA 证书。" -#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of -#, fuzzy -msgid "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_fit_clients `\\ " -"\\(num\\_available\\_clients\\)" +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "服务器证书。" -#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "服务器私人密钥。" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "**hist** -- 包含训练和评估指标的对象。" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "启动不安全的服务器:" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "启动支持 SSL 的服务器:" + +#: ../../source/ref-api/flwr.server.strategy.rst:2 #, fuzzy -msgid "QFedAvg" -msgstr "DP-FedAvg" +msgid "strategy" +msgstr "Krum 策略。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ \\(server\\_round\\," -" results\\, ...\\)" +":py:obj:`Bulyan `\\ \\(\\*\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy." +msgstr "Bulyan 策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`DPFedAvgAdaptive `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ \\(server\\_round\\," -" parameters\\, ...\\)" +":py:obj:`DPFedAvgFixed `\\ " +"\\(strategy\\, num\\_sampled\\_clients\\, ...\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`DifferentialPrivacyClientSideAdaptiveClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: of #, fuzzy -msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" -msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +msgid "Strategy wrapper for central DP with client-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`DifferentialPrivacyClientSideFixedClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: of #, fuzzy -msgid "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" -msgstr "" -":py:obj:`num_evaluation_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +msgid "Strategy wrapper for central DP with client-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" -#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`num_fit_clients " -"`\\ " -"\\(num\\_available\\_clients\\)" +":py:obj:`DifferentialPrivacyServerSideAdaptiveClipping " +"`\\ " +"\\(...\\)" -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: of #, fuzzy -msgid "Strategy" -msgstr "Krum 策略。" +msgid "Strategy wrapper for central DP with server-side adaptive clipping." +msgstr "用于配置具有自适应剪切功能的 DP 策略的包装器。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" msgstr "" -":py:obj:`aggregate_evaluate " -"`\\ " -"\\(server\\_round\\, results\\, ...\\)" +":py:obj:`DifferentialPrivacyServerSideFixedClipping " +"`\\ " +"\\(...\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 #: of -msgid "Aggregate evaluation results." -msgstr "聚合评估结果。" +#, fuzzy +msgid "Strategy wrapper for central DP with server-side fixed clipping." +msgstr "封装器,用于为具有固定剪切功能的 DP 配置策略。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`aggregate_fit `\\ " -"\\(server\\_round\\, results\\, failures\\)" +":py:obj:`FaultTolerantFedAvg " +"`\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of -msgid "Aggregate training results." -msgstr "汇总训练结果。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "可配置的容错 FedAvg 策略实施。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`configure_evaluate " -"`\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedAdagrad `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "FedAdagrad 策略 - 使用 Adagrad 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`configure_fit `\\ " -"\\(server\\_round\\, parameters\\, ...\\)" +":py:obj:`FedAdam `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "FedAdam - 使用 Adam 进行自适应联合优化。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" msgstr "" -":py:obj:`evaluate `\\ " -"\\(server\\_round\\, parameters\\)" +":py:obj:`FedAvg `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.evaluate:1 of -msgid "Evaluate the current model parameters." -msgstr "评估当前的模型参数。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of +msgid "Federated Averaging strategy." +msgstr "联邦平均策略。" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`initialize_parameters " -"`\\ " -"\\(client\\_manager\\)" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of -msgid "Initialize the (global) model parameters." -msgstr "初始化(全局)模型参数。" +":py:obj:`FedAvgAndroid `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " -"one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " -"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of -msgid "Exceptions that occurred while the server was waiting for client updates." -msgstr "服务器等待客户端更新时发生的异常。" - -#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of -msgid "" -"**aggregation_result** -- The aggregated evaluation result. Aggregation " -"typically uses some variant of a weighted average." -msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" +":py:obj:`FedAvgM `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of -msgid "" -"Successful updates from the previously selected and configured clients. " -"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" -" one of the previously selected clients. Not that not all previously " -"selected clients are necessarily included in this list: a client might " -"drop out and not submit a result. For each client that did not submit an " -"update, there should be an `Exception` in `failures`." -msgstr "" -"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " -"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" -" \"中都应该有一个 \"异常\"。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "联邦平均动量策略。" -#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the new global model parameters (i.e., it will replace the " -"previous parameters with the ones returned from this method). If `None` " -"is returned (e.g., because there were only failures and no viable " -"results) then the server will no update the previous model parameters, " -"the updates received in this round are discarded, and the global model " -"parameters remain the same." +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " -"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" - -#: flwr.server.strategy.strategy.Strategy.evaluate:3 of -msgid "" -"This function can be used to perform centralized (i.e., server-side) " -"evaluation of model parameters." -msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" +":py:obj:`FedMedian `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.strategy.strategy.Strategy.evaluate:11 of -msgid "" -"**evaluation_result** -- The evaluation result, usually a Tuple " -"containing loss and a dictionary containing task-specific metrics (e.g., " -"accuracy)." -msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +#, fuzzy +msgid "Configurable FedMedian strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" -#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#, fuzzy msgid "" -"**parameters** -- If parameters are returned, then the server will treat " -"these as the initial global model parameters." -msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" +":py:obj:`FedOpt `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedopt.FedOpt:1 of #, fuzzy -msgid "workflow" -msgstr "工作流程" +msgid "Federated Optim strategy." +msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`DefaultWorkflow `\\ " -"\\(\\[fit\\_workflow\\, ...\\]\\)" +":py:obj:`FedProx `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of -#, fuzzy -msgid "Default workflow in Flower." -msgstr "Flower 中的默认工作流程。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Federated Optimization strategy." +msgstr "联邦优化策略。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" msgstr "" -":py:obj:`SecAggPlusWorkflow `\\ " -"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" +":py:obj:`FedTrimmedAvg `\\ " +"\\(\\*\\[\\, fraction\\_fit\\, ...\\]\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 -#: of -#, fuzzy -msgid "The workflow for the SecAgg+ protocol." -msgstr "SecAgg+ 协议的工作流程。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "带修剪均值的联邦平均法[Dong Yin 等,2021]。" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" msgstr "" -":py:obj:`SecAggWorkflow `\\ " -"\\(reconstruction\\_threshold\\, \\*\\)" +":py:obj:`FedXgbBagging `\\ " +"\\(\\[evaluate\\_function\\]\\)" -#: ../../source/ref-api/flwr.server.workflow.rst:24::1 -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 of #, fuzzy -msgid "The workflow for the SecAgg protocol." -msgstr "SecAgg 协议的工作流程。" +msgid "Configurable FedXgbBagging strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" -#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "DefaultWorkflow" -msgstr "工作流程" +msgid "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" +msgstr "" +":py:obj:`FedXgbCyclic `\\ " +"\\(\\*\\*kwargs\\)" -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 of #, fuzzy -msgid "SecAggPlusWorkflow" -msgstr "工作流程" +msgid "Configurable FedXgbCyclic strategy implementation." +msgstr "可配置的 FedAvg 策略实施。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The SecAgg+ protocol ensures the secure summation of integer vectors " -"owned by multiple parties, without accessing any individual integer " -"vector. This workflow allows the server to compute the weighted average " -"of model parameters across all clients, ensuring individual contributions" -" remain private. This is achieved by clients sending both, a weighting " -"factor and a weighted version of the locally updated parameters, both of " -"which are masked for privacy. Specifically, each client uploads \"[w, w *" -" params]\" with masks, where weighting factor 'w' is the number of " -"examples ('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" msgstr "" -"SecAgg+ " -"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," -" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " -"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" +":py:obj:`FedXgbNnAvg `\\ \\(\\*args\\, " +"\\*\\*kwargs\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "可配置的 FedXgbNAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg+ " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ 配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" +msgstr "" +":py:obj:`FedYogi `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 -#: of -#, fuzzy -msgid "key shares." -msgstr "关键股份。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:18 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:18 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"'collect masked vectors': Forward encrypted secret key shares to target " -"clients and collect masked model parameters." -msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" +msgstr "" +":py:obj:`Krum `\\ \\(\\*\\[\\, " +"fraction\\_fit\\, fraction\\_evaluate\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:20 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:20 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.krum.Krum:1 of #, fuzzy -msgid "" -"'unmask': Collect secret key shares to decrypt and aggregate the model " -"parameters." -msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" +msgid "Krum [Blanchard et al., 2017] strategy." +msgstr "FedYogi [Reddi 等人,2020] 策略。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:22 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:22 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy msgid "" -"Only the aggregated model parameters are exposed and passed to " -"`Strategy.aggregate_fit`, ensuring individual data privacy." -msgstr "只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐私。" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" +msgstr "" +":py:obj:`QFedAvg `\\ \\(\\*\\[\\, " +"q\\_param\\, qffl\\_learning\\_rate\\, ...\\]\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:25 -#: of +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "可配置的 QFedAvg 策略实施。" + +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 #, fuzzy -msgid "" -"The number of shares into which each client's private key is split under " -"the SecAgg+ protocol. If specified as a float, it represents the " -"proportion of all selected clients, and the number of shares will be set " -"dynamically in the run time. A private key can be reconstructed from " -"these shares, allowing for the secure aggregation of model updates. Each " -"client sends one share to each of its neighbors while retaining one." -msgstr "" -"在 SecAgg+ " -"协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" +msgid ":py:obj:`Strategy `\\ \\(\\)" +msgstr "server.strategy.Strategy" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:25 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:32 -#: of -#, fuzzy -msgid "" -"The minimum number of shares required to reconstruct a client's private " -"key, or, if specified as a float, it represents the proportion of the " -"total number of shares needed for reconstruction. This threshold ensures " -"privacy by allowing for the recovery of contributions from dropped " -"clients during aggregation, without compromising individual client data." -msgstr "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个客户的数据。" +#: ../../source/ref-api/flwr.server.strategy.rst:45::1 +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "服务器策略实现的抽象基类。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:31 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:38 -#: of +#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst:2 #, fuzzy -msgid "" -"The maximum value of the weight that can be assigned to any single " -"client's update during the weighted average calculation on the server " -"side, e.g., in the FedAvg algorithm." -msgstr "在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的权重的最大值。" +msgid "Bulyan" +msgstr "Bulyan" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:35 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:42 -#: of +#: flwr.server.strategy.bulyan.Bulyan:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 +#: flwr.server.strategy.fedopt.FedOpt:1 flwr.server.strategy.fedprox.FedProx:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 +#: flwr.server.strategy.krum.Krum:1 flwr.server.strategy.qfedavg.QFedAvg:1 of #, fuzzy -msgid "" -"The range within which model parameters are clipped before quantization. " -"This parameter ensures each model parameter is bounded within " -"[-clipping_range, clipping_range], facilitating quantization." -msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, clipping_range] 范围内,便于量化。" +msgid "Bases: :py:class:`~flwr.server.strategy.fedavg.FedAvg`" +msgstr "server.strategy.DPFedAvgFixed" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:39 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:46 -#: of -#, fuzzy -msgid "" -"The size of the range into which floating-point model parameters are " -"quantized, mapping each parameter to an integer in [0, " -"quantization_range-1]. This facilitates cryptographic operations on the " -"model updates." -msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整数。这有助于对模型更新进行加密操作。" +#: flwr.server.strategy.bulyan.Bulyan:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:43 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:50 -#: of -#, fuzzy -msgid "" -"The range of values from which random mask entries are uniformly sampled " -"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " -"Please use 2**n values for `modulus_range` to prevent overflow issues." -msgstr "" -"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range \"必须小于 " -"4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的值。" +#: flwr.server.strategy.bulyan.Bulyan:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad:5 +#: flwr.server.strategy.fedadam.FedAdam:5 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:5 +#: flwr.server.strategy.fedavgm.FedAvgM:5 flwr.server.strategy.fedopt.FedOpt:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:5 +#: flwr.server.strategy.fedyogi.FedYogi:5 flwr.server.strategy.krum.Krum:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "训练期间使用客户的比例。默认为 1.0。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:47 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:54 -#: of -#, fuzzy -msgid "" -"The timeout duration in seconds. If specified, the workflow will wait for" -" replies for this duration each time. If `None`, there is no time limit " -"and the workflow will wait until replies for all messages are received." -msgstr "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 \"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" +#: flwr.server.strategy.bulyan.Bulyan:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad:7 +#: flwr.server.strategy.fedadam.FedAdam:7 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:7 +#: flwr.server.strategy.fedavgm.FedAvgM:7 flwr.server.strategy.fedopt.FedOpt:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:7 +#: flwr.server.strategy.fedyogi.FedYogi:7 flwr.server.strategy.krum.Krum:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "验证过程中使用的客户端比例。默认为 1.0。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:61 -#: of -#, fuzzy -msgid "" -"Generally, higher `num_shares` means more robust to dropouts while " -"increasing the computational costs; higher `reconstruction_threshold` " -"means better privacy guarantees but less tolerance to dropouts." -msgstr "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" +#: flwr.server.strategy.bulyan.Bulyan:9 +#: flwr.server.strategy.fedadagrad.FedAdagrad:9 +#: flwr.server.strategy.fedadam.FedAdam:9 flwr.server.strategy.fedavg.FedAvg:13 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:9 +#: flwr.server.strategy.fedavgm.FedAvgM:9 flwr.server.strategy.fedopt.FedOpt:9 +#: flwr.server.strategy.fedprox.FedProx:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:9 +#: flwr.server.strategy.fedyogi.FedYogi:9 flwr.server.strategy.krum.Krum:9 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "训练期间使用的最少客户数。默认为 2。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:58 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:64 -#: of -#, fuzzy -msgid "Too large `max_weight` may compromise the precision of the quantization." -msgstr "过大的 `max_weight` 可能会影响量化的精度。" +#: flwr.server.strategy.bulyan.Bulyan:11 +#: flwr.server.strategy.fedadagrad.FedAdagrad:11 +#: flwr.server.strategy.fedadam.FedAdam:11 +#: flwr.server.strategy.fedavg.FedAvg:15 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:11 +#: flwr.server.strategy.fedavgm.FedAvgM:11 +#: flwr.server.strategy.fedopt.FedOpt:11 +#: flwr.server.strategy.fedprox.FedProx:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:11 +#: flwr.server.strategy.fedyogi.FedYogi:11 flwr.server.strategy.krum.Krum:11 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "验证过程中使用的最少客户端数量。默认为 2。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 -#: of -#, fuzzy -msgid "`modulus_range` must be 2**n and larger than `quantization_range`." -msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" +#: flwr.server.strategy.bulyan.Bulyan:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad:13 +#: flwr.server.strategy.fedadam.FedAdam:13 +#: flwr.server.strategy.fedavg.FedAvg:17 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:13 +#: flwr.server.strategy.fedavgm.FedAvgM:13 +#: flwr.server.strategy.fedopt.FedOpt:13 +#: flwr.server.strategy.fedprox.FedProx:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:13 +#: flwr.server.strategy.fedyogi.FedYogi:13 flwr.server.strategy.krum.Krum:13 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "系统中客户总数的最小值。默认为 2。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 -#: of -#, fuzzy -msgid "" -"When `num_shares` is a float, it is interpreted as the proportion of all " -"selected clients, and hence the number of shares will be determined in " -"the runtime. This allows for dynamic adjustment based on the total number" -" of participating clients." -msgstr "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运行时确定。这样就可以根据参与客户端的总数进行动态调整。" +#: flwr.server.strategy.bulyan.Bulyan:15 flwr.server.strategy.krum.Krum:15 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "系统中恶意客户端的数量。默认为 0。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:69 -#: of -#, fuzzy -msgid "" -"Similarly, when `reconstruction_threshold` is a float, it is interpreted " -"as the proportion of the number of shares needed for the reconstruction " -"of a private key. This feature enables flexibility in setting the " -"security threshold relative to the number of distributed shares." -msgstr "" -"同样,当 `reconstruction_threshold` " -"为浮点数时,它被解释为重建私钥所需的份额数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" +#: flwr.server.strategy.bulyan.Bulyan:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad:15 +#: flwr.server.strategy.fedadam.FedAdam:15 +#: flwr.server.strategy.fedavg.FedAvg:19 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:15 +#: flwr.server.strategy.fedavgm.FedAvgM:15 +#: flwr.server.strategy.fedopt.FedOpt:15 +#: flwr.server.strategy.fedprox.FedProx:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:15 +#: flwr.server.strategy.fedyogi.FedYogi:17 +#: flwr.server.strategy.fedyogi.FedYogi:18 +#: flwr.server.strategy.fedyogi.FedYogi:19 flwr.server.strategy.krum.Krum:20 of +msgid "Optional function used for validation. Defaults to None." +msgstr "用于验证的可选函数。默认为 \"无\"。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:73 -#: of -#, fuzzy +#: flwr.server.strategy.bulyan.Bulyan:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad:17 +#: flwr.server.strategy.fedadam.FedAdam:17 +#: flwr.server.strategy.fedavg.FedAvg:21 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:17 +#: flwr.server.strategy.fedavgm.FedAvgM:17 +#: flwr.server.strategy.fedopt.FedOpt:17 +#: flwr.server.strategy.fedprox.FedProx:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:17 +#: flwr.server.strategy.fedyogi.FedYogi:20 flwr.server.strategy.krum.Krum:22 of +msgid "Function used to configure training. Defaults to None." +msgstr "用于配置训练的功能。默认为 \"无\"。" + +#: flwr.server.strategy.bulyan.Bulyan:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:19 +#: flwr.server.strategy.fedadam.FedAdam:19 +#: flwr.server.strategy.fedavg.FedAvg:23 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:19 +#: flwr.server.strategy.fedavgm.FedAvgM:19 +#: flwr.server.strategy.fedopt.FedOpt:19 +#: flwr.server.strategy.fedprox.FedProx:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:19 +#: flwr.server.strategy.fedyogi.FedYogi:22 flwr.server.strategy.krum.Krum:24 of +msgid "Function used to configure validation. Defaults to None." +msgstr "用于配置验证的函数。默认为 \"无\"。" + +#: flwr.server.strategy.bulyan.Bulyan:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad:25 +#: flwr.server.strategy.fedadam.FedAdam:21 +#: flwr.server.strategy.fedavg.FedAvg:25 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:21 +#: flwr.server.strategy.fedavgm.FedAvgM:21 +#: flwr.server.strategy.fedopt.FedOpt:21 +#: flwr.server.strategy.fedprox.FedProx:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:21 +#: flwr.server.strategy.fedyogi.FedYogi:24 flwr.server.strategy.krum.Krum:26 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "是否接受包含失败的轮。默认为 True。" + +#: flwr.server.strategy.bulyan.Bulyan:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad:27 +#: flwr.server.strategy.fedadam.FedAdam:23 +#: flwr.server.strategy.fedavg.FedAvg:27 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:24 +#: flwr.server.strategy.fedavgm.FedAvgM:23 +#: flwr.server.strategy.fedopt.FedOpt:23 +#: flwr.server.strategy.fedprox.FedProx:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:23 +#: flwr.server.strategy.fedyogi.FedYogi:26 flwr.server.strategy.krum.Krum:28 of +msgid "Initial global model parameters." +msgstr "初始全局模型参数。" + +#: flwr.server.strategy.bulyan.Bulyan:27 of msgid "" -"`num_shares`, `reconstruction_threshold`, and the quantization parameters" -" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg+" -" protocol." -msgstr "" -"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")在平衡 SecAgg+ " -"协议的隐私性、稳健性和效率方面发挥着关键作用。" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "Byzantine弹性聚合规则,用作 Bulyan 的第一步(如 Krum)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.bulyan.Bulyan:29 of +msgid "arguments to the first_aggregation rule" +msgstr "第一聚类规则的参数" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -":py:obj:`collect_masked_vectors_stage " -"`\\" -" \\(driver\\, ...\\)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of -#, fuzzy -msgid "Execute the 'collect masked vectors' stage." -msgstr "执行 \"收集屏蔽向量 \"阶段。" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "采用加权平均法计算评估损失总额。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -":py:obj:`setup_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 -#: of -#, fuzzy -msgid "Execute the 'setup' stage." -msgstr "执行 \"设置 \"阶段。" +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +msgid "Aggregate fit results using Bulyan." +msgstr "使用 Bulyan 技术汇总拟合结果。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -":py:obj:`share_keys_stage " -"`\\ " -"\\(driver\\, context\\, state\\)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 -#: of -#, fuzzy -msgid "Execute the 'share keys' stage." -msgstr "执行 \"共享密钥 \"阶段。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_evaluate:1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "配置下一轮评估。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" - -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 -#: of -#, fuzzy -msgid "Execute the 'unmask' stage." -msgstr "执行 \"解除屏蔽 \"阶段。" - -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 -#, fuzzy -msgid "SecAggWorkflow" -msgstr "工作流程" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of -#, fuzzy -msgid "" -"Bases: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" -msgstr "" -"基础: " -":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`." +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.configure_fit:1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "配置下一轮训练。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The SecAgg protocol ensures the secure summation of integer vectors owned" -" by multiple parties, without accessing any individual integer vector. " -"This workflow allows the server to compute the weighted average of model " -"parameters across all clients, ensuring individual contributions remain " -"private. This is achieved by clients sending both, a weighting factor and" -" a weighted version of the locally updated parameters, both of which are " -"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" -" with masks, where weighting factor 'w' is the number of examples " -"('num_examples') and 'params' represents the model parameters " -"('parameters') from the client's `FitRes`. The server then aggregates " -"these contributions to compute the weighted average of model parameters." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"SecAgg " -"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," -" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " -"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 of -#, fuzzy -msgid "" -"The protocol involves four main stages: - 'setup': Send SecAgg " -"configuration to clients and collect their public keys. - 'share keys': " -"Broadcast public keys among clients and collect encrypted secret" -msgstr "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg 配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "使用评估函数评估模型参数。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:54 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Each client's private key is split into N shares under the SecAgg " -"protocol, where N is the number of selected clients." -msgstr "根据 SecAgg 协议,每个客户的私人密钥被分成 N 份,其中 N 是所选客户的数量。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:56 of -#, fuzzy -msgid "" -"Generally, higher `reconstruction_threshold` means better privacy " -"guarantees but less tolerance to dropouts." -msgstr "一般来说,\"重建阈值 \"越高,隐私保证就越好,但对丢包的容忍度就越低。" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +msgid "Initialize global model parameters." +msgstr "初始化全局模型参数。" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"When `reconstruction_threshold` is a float, it is interpreted as the " -"proportion of the number of all selected clients needed for the " -"reconstruction of a private key. This feature enables flexibility in " -"setting the security threshold relative to the number of selected " -"clients." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"当 `reconstruction_threshold` " -"为浮点数时,它被解释为重建私钥所需的所有选定客户端数量的比例。此功能可根据所选客户端的数量灵活设置安全阈值。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:64 of -#, fuzzy -msgid "" -"`reconstruction_threshold`, and the quantization parameters " -"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " -"roles in balancing privacy, robustness, and efficiency within the SecAgg " -"protocol." -msgstr "" -"重构阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模量范围\")在 SecAgg " -"协议中平衡隐私性、鲁棒性和效率方面起着至关重要的作用。" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_evaluation_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "使用部分可用客户进行评估。" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 -#: of +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -":py:obj:`collect_masked_vectors_stage " -"`\\ " -"\\(driver\\, ...\\)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -":py:obj:`collect_masked_vectors_stage " -"`\\(driver\\," -" ...\\)" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.num_fit_clients:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "返回样本大小和所需的可用客户数量。" + +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst:2 +msgid "DPFedAvgAdaptive" +msgstr "DPFedAvgAdaptive" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +#, fuzzy +msgid "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" +msgstr "Bases: :py:class:`~flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed`" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:3 of +#, fuzzy +msgid "This class is deprecated and will be removed in a future release." +msgstr "该类已被弃用,将在以后的版本中删除。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`setup_stage `\\" -" \\(driver\\, context\\, state\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -":py:obj:`setup_stage " -"`\\(driver\\, context\\," -" state\\)" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "使用给定的策略汇总评估损失。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`share_keys_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"py:obj:`share_keys_stage " -"`\\(driver\\, " -"context\\, state\\)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 #: of #, fuzzy msgid "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -":py:obj:`unmask_stage " -"`\\ \\(driver\\, " -"context\\, state\\)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.rst:2 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "使用指定策略配置下一轮评估。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "simulation" -msgstr "运行模拟" +msgid "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\[\\, ...\\]\\)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "启动基于 Ray 的Flower模拟服务器。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.evaluate:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "使用策略中的评估函数评估模型参数。" -#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -":py:obj:`run_simulation `\\ " -"\\(server\\_app\\, client\\_app\\, ...\\)" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.run_simulation.run_simulation:1 of -#, fuzzy -msgid "Run a Flower App using the Simulation Engine." -msgstr "使用模拟引擎运行花朵应用程序。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.initialize_parameters:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "使用给定的策略初始化全局模型参数。" -#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 -#, fuzzy -msgid "run\\_simulation" -msgstr "运行模拟" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "本轮联邦学习。" -#: flwr.simulation.run_simulation.run_simulation:3 of -#, fuzzy -msgid "" -"The `ServerApp` to be executed. It will send messages to different " -"`ClientApp` instances running on different (virtual) SuperNodes." -msgstr "要执行的 `ServerApp`。它将向运行在不同(虚拟)超级节点上的不同 `ClientApp`实例发送消息。" +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "客户端管理器,用于管理当前连接的所有客户端。" -#: flwr.simulation.run_simulation.run_simulation:6 of -#, fuzzy +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of msgid "" -"The `ClientApp` to be executed by each of the SuperNodes. It will receive" -" messages sent by the `ServerApp`." -msgstr "由每个超级节点执行的 `ClientApp`。它将接收由 `ServerApp` 发送的信息。" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" +"**evaluate_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`EvaluateIns`。如果某个特定的" +" `ClientProxy` 未包含在此列表中,则表示该 `ClientProxy` 将不参与下一轮联合评估。" -#: flwr.simulation.run_simulation.run_simulation:9 of -#, fuzzy -msgid "" -"Number of nodes that run a ClientApp. They can be sampled by a Driver in " -"the ServerApp and receive a Message describing what the ClientApp should " -"perform." -msgstr "运行 ClientApp 的节点数。它们可被 ServerApp 中的驱动程序采样,并接收描述 ClientApp 应执行的操作的信息。" +#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst:2 +msgid "DPFedAvgFixed" +msgstr "DPFedAvgFixed" -#: flwr.simulation.run_simulation.run_simulation:13 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 +#: flwr.server.strategy.fedavg.FedAvg:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:1 of #, fuzzy -msgid "A simulation backend that runs `ClientApp`s." -msgstr "运行 \"客户端应用程序 \"的模拟后台。" +msgid "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" +msgstr "Bases: :py:class:`~flwr.server.strategy.strategy.Strategy`" -#: flwr.simulation.run_simulation.run_simulation:15 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"'A dictionary, e.g {\"\": , \"\": } to " -"configure a backend. Values supported in are those included by " -"`flwr.common.typing.ConfigsRecordValues`." +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"字典,例如 {\"\": , \"\": } 来配置后端。 中支持的值是 " -"`flwr.common.typing.ConfigsRecordValues`中包含的值。" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: flwr.simulation.run_simulation.run_simulation:19 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"A boolean to indicate whether to enable GPU growth on the main thread. " -"This is desirable if you make use of a TensorFlow model on your " -"`ServerApp` while having your `ClientApp` running on the same GPU. " -"Without enabling this, you might encounter an out-of-memory error because" -" TensorFlow, by default, allocates all GPU memory. Read more about how " -"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " -"documentation: https://www.tensorflow.org/api/stable." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"布尔值,用于指示是否在主线程上启用 GPU 增长。如果您在 \"ServerApp \"上使用 TensorFlow 模型,同时让 " -"\"ClientApp \"在同一 GPU 上运行,则最好启用此选项。如果不启用此功能,您可能会遇到内存不足的错误,因为 TensorFlow " -"默认会分配所有 GPU 内存。有关 `tf.config.experimental.set_memory_growth()` " -"如何工作的更多信息,请参阅 TensorFlow 文档:https://www.tensorflow.org/api/stable。" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: flwr.simulation.run_simulation.run_simulation:26 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "使用非加权汇总法汇总训练结果。" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"When diabled, only INFO, WARNING and ERROR log messages will be shown. If" -" enabled, DEBUG-level logs will be displayed." -msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用后,将显示 DEBUG 级日志。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "start\\_simulation" -msgstr "start_simulation" - -#: flwr.simulation.app.start_simulation:3 of msgid "" -"A function creating client instances. The function must take a single " -"`str` argument called `cid`. It should return a single client instance of" -" type Client. Note that the created client instances are ephemeral and " -"will often be destroyed after a single method invocation. Since client " -"instances are not long-lived, they should not attempt to carry state over" -" method invocations. Any state required by the instance (model, dataset, " -"hyperparameters, ...) should be (re-)created in either the call to " -"`client_fn` or the call to any of the client methods (e.g., load " -"evaluation data in the `evaluate` method itself)." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个 Client " -"类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" -" `client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重新)创建。" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: flwr.simulation.app.start_simulation:13 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of msgid "" -"The total number of clients in this simulation. This must be set if " -"`clients_ids` is not set and vice-versa." -msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "配置包含差分隐私 (DP) 的下一轮训练。" -#: flwr.simulation.app.start_simulation:16 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"List `client_id`s for each client. This is only required if `num_clients`" -" is not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"列出每个客户的 `client_id`。只有在未设置 `num_clients` " -"时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: flwr.simulation.app.start_simulation:20 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解" -" `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray 文档。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: flwr.simulation.app.start_simulation:25 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." -msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "下一轮训练的配置包括与 DP 相关的信息,如片段规范和噪声 stddev。" -#: flwr.simulation.app.start_simulation:31 of +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." msgstr "" -"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" +"**fit_configuration** -- " +"一个元组列表。列表中的每个元组都标识了一个`ClientProxy`和该特定`ClientProxy`的`FitIns'。如果某个特定的`ClientProxy`不在此列表中,则表示该`ClientProxy`将不参加下一轮联合学习。" -#: flwr.simulation.app.start_simulation:35 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_simulation` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyClientSideAdaptiveClipping" +msgstr "DifferentialPrivacyClientSideAdaptiveClipping" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:3 +#: of +#, fuzzy +msgid "Use `adaptiveclipping_mod` modifier at the client side." +msgstr "在客户端使用 \"adaptiveclipping_mod \"修改器。" -#: flwr.simulation.app.start_simulation:39 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:5 +#: of +#, fuzzy msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." +"In comparison to `DifferentialPrivacyServerSideAdaptiveClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideAdaptiveClipping` expects clipping to " +"happen on the client-side, usually by using the built-in " +"`adaptiveclipping_mod`." msgstr "" -"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" -" { \"ignore_reinit_error\": True, \"include_dashboard\": False } " -"可以使用空字典(ray_init_args={})来防止向 ray.init 传递任何参数。" +"与在服务器端执行剪切的 `DifferentialPrivacyServerSideAdaptiveClipping` " +"相比,`DifferentialPrivacyClientSideAdaptiveClipping` 希望在客户端进行剪切,通常使用内置的 " +"`adaptiveclipping_mod`。" -#: flwr.simulation.app.start_simulation:39 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:10 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:3 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:10 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:3 +#: of +#, fuzzy +msgid "The strategy to which DP functionalities will be added by this wrapper." +msgstr "该包装器将添加 DP 功能的策略。" -#: flwr.simulation.app.start_simulation:43 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:12 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:5 +#: of +#, fuzzy +msgid "The noise multiplier for the Gaussian mechanism for model updates." +msgstr "用于模型更新的高斯机制的噪声乘数。" -#: flwr.simulation.app.start_simulation:45 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:14 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:7 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:17 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:10 +#: of +#, fuzzy +msgid "The number of clients that are sampled on each round." +msgstr "每轮取样的客户数。" -#: flwr.simulation.app.start_simulation:48 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:16 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:9 +#: of +#, fuzzy msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" +"The initial value of clipping norm. Defaults to 0.1. Andrew et al. " +"recommends to set to 0.1." +msgstr "剪切规范的初始值。默认为 0.1。安德鲁等人建议设置为 0.1。" -#: flwr.simulation.app.start_simulation:50 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:12 +#: of #, fuzzy -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" +msgid "The desired quantile of updates which should be clipped. Defaults to 0.5." +msgstr "需要剪切的更新量化值。默认为 0.5。" -#: flwr.simulation.app.start_simulation:54 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:14 +#: of +#, fuzzy msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" +"The learning rate for the clipping norm adaptation. Defaults to 0.2. " +"Andrew et al. recommends to set to 0.2." +msgstr "剪切规范适应的学习率。默认为 0.2。安德鲁等人建议设置为 0.2。" -#: flwr.simulation.app.start_simulation:57 of +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:24 +#: of +#, fuzzy msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" -"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE " -"选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " -"NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " -"文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" - -#: flwr.simulation.app.start_simulation:66 of -msgid "**hist** -- Object containing metrics from training." -msgstr "**hist** -- 包含训练指标的对象。" +"The stddev of the noise added to the count of updates currently below the" +" estimate. Andrew et al. recommends to set to `expected_num_records/20`" +msgstr "添加到当前低于估计值的更新计数中的噪声的 stddev。安德鲁等人建议设置为 \"expected_num_records/20" -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "更新日志" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:30 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:23 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:22 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:15 +#: of +#, fuzzy +msgid "Create a strategy:" +msgstr "server.strategy" -#: ../../source/ref-changelog.md:3 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:34 +#: of #, fuzzy -msgid "v1.9.0 (2024-06-10)" -msgstr "v1.3.0 (2023-02-06)" +msgid "" +"Wrap the strategy with the " +"`DifferentialPrivacyClientSideAdaptiveClipping` wrapper:" +msgstr "用 \"DifferentialPrivacyClientSideAdaptiveClipping \"包装器对策略进行包装:" -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:105 -#: ../../source/ref-changelog.md:169 ../../source/ref-changelog.md:262 -#: ../../source/ref-changelog.md:362 ../../source/ref-changelog.md:446 -#: ../../source/ref-changelog.md:510 ../../source/ref-changelog.md:568 -#: ../../source/ref-changelog.md:637 ../../source/ref-changelog.md:706 -msgid "Thanks to our contributors" -msgstr "感谢我们的贡献者" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping:40 +#: of +#, fuzzy +msgid "On the client, add the `adaptiveclipping_mod` to the client-side mods:" +msgstr "在客户端,将 \"adaptiveclipping_mod \"添加到客户端模块中:" -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:107 -#: ../../source/ref-changelog.md:171 ../../source/ref-changelog.md:264 -#: ../../source/ref-changelog.md:364 ../../source/ref-changelog.md:448 -#: ../../source/ref-changelog.md:512 ../../source/ref-changelog.md:570 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:9 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " -"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," -" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:11 ../../source/ref-changelog.md:111 -#: ../../source/ref-changelog.md:175 ../../source/ref-changelog.md:268 -#: ../../source/ref-changelog.md:368 ../../source/ref-changelog.md:452 -#: ../../source/ref-changelog.md:516 ../../source/ref-changelog.md:574 -#: ../../source/ref-changelog.md:643 ../../source/ref-changelog.md:772 -#: ../../source/ref-changelog.md:814 ../../source/ref-changelog.md:881 -#: ../../source/ref-changelog.md:947 ../../source/ref-changelog.md:992 -#: ../../source/ref-changelog.md:1031 ../../source/ref-changelog.md:1064 -#: ../../source/ref-changelog.md:1114 -msgid "What's new?" -msgstr "有什么新内容?" +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_fit:1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_fit:1 +#: of +#, fuzzy +msgid "Aggregate training results and update clip norms." +msgstr "汇总 DPFedAvgFixed 中的训练结果并更新片段标准。" -#: ../../source/ref-changelog.md:13 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce built-in authentication (preview)** " -"([#2946](https://github.com/adap/flower/pull/2946), " -"[#3388](https://github.com/adap/flower/pull/3388), " -"[#2948](https://github.com/adap/flower/pull/2948), " -"[#2917](https://github.com/adap/flower/pull/2917), " -"[#3386](https://github.com/adap/flower/pull/3386), " -"[#3308](https://github.com/adap/flower/pull/3308), " -"[#3001](https://github.com/adap/flower/pull/3001), " -"[#3409](https://github.com/adap/flower/pull/3409), " -"[#2999](https://github.com/adap/flower/pull/2999), " -"[#2979](https://github.com/adap/flower/pull/2979), " -"[#3389](https://github.com/adap/flower/pull/3389), " -"[#3503](https://github.com/adap/flower/pull/3503), " -"[#3366](https://github.com/adap/flower/pull/3366), " -"[#3357](https://github.com/adap/flower/pull/3357))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:15 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Flower 1.9 introduces the first build-in version of client node " -"authentication. In previous releases, users often wrote glue code to " -"connect Flower to external authentication systems. With this release, the" -" SuperLink can authenticate SuperNodes using a built-in authentication " -"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" -"authenticate-supernodes.html) and a new [code " -"example](https://github.com/adap/flower/tree/main/examples/flower-" -"authentication) help you to get started." -msgstr "" - -#: ../../source/ref-changelog.md:17 -msgid "" -"This is the first preview release of the Flower-native authentication " -"system. Many additional features are on the roadmap for upcoming Flower " -"releases - stay tuned." +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:19 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce end-to-end Docker support** " -"([#3483](https://github.com/adap/flower/pull/3483), " -"[#3266](https://github.com/adap/flower/pull/3266), " -"[#3390](https://github.com/adap/flower/pull/3390), " -"[#3283](https://github.com/adap/flower/pull/3283), " -"[#3285](https://github.com/adap/flower/pull/3285), " -"[#3391](https://github.com/adap/flower/pull/3391), " -"[#3403](https://github.com/adap/flower/pull/3403), " -"[#3458](https://github.com/adap/flower/pull/3458), " -"[#3533](https://github.com/adap/flower/pull/3533), " -"[#3453](https://github.com/adap/flower/pull/3453), " -"[#3486](https://github.com/adap/flower/pull/3486), " -"[#3290](https://github.com/adap/flower/pull/3290))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:21 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Full Flower Next Docker support is here! With the release of Flower 1.9, " -"Flower provides stable Docker images for the Flower SuperLink, the Flower" -" SuperNode, and the Flower `ServerApp`. This set of images enables you to" -" run all Flower components in Docker. Check out the new [how-to " -"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html) to get stated." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-changelog.md:23 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst:2 #, fuzzy -msgid "" -"**Re-architect Flower Next simulation engine** " -"([#3307](https://github.com/adap/flower/pull/3307), " -"[#3355](https://github.com/adap/flower/pull/3355), " -"[#3272](https://github.com/adap/flower/pull/3272), " -"[#3273](https://github.com/adap/flower/pull/3273), " -"[#3417](https://github.com/adap/flower/pull/3417), " -"[#3281](https://github.com/adap/flower/pull/3281), " -"[#3343](https://github.com/adap/flower/pull/3343), " -"[#3326](https://github.com/adap/flower/pull/3326))" -msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +msgid "DifferentialPrivacyClientSideFixedClipping" +msgstr "差分隐私" -#: ../../source/ref-changelog.md:25 -msgid "" -"Flower Next simulations now use a new in-memory `Driver` that improves " -"the reliability of simulations, especially in notebook environments. This" -" is a significant step towards a complete overhaul of the Flower Next " -"simulation architecture." -msgstr "" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:3 +#: of +#, fuzzy +msgid "Use `fixedclipping_mod` modifier at the client side." +msgstr "在客户端使用 `fixedclipping_mod` 修改器。" -#: ../../source/ref-changelog.md:27 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:5 +#: of #, fuzzy msgid "" -"**Upgrade simulation engine** " -"([#3354](https://github.com/adap/flower/pull/3354), " -"[#3378](https://github.com/adap/flower/pull/3378), " -"[#3262](https://github.com/adap/flower/pull/3262), " -"[#3435](https://github.com/adap/flower/pull/3435), " -"[#3501](https://github.com/adap/flower/pull/3501), " -"[#3482](https://github.com/adap/flower/pull/3482), " -"[#3494](https://github.com/adap/flower/pull/3494))" +"In comparison to `DifferentialPrivacyServerSideFixedClipping`, which " +"performs clipping on the server-side, " +"`DifferentialPrivacyClientSideFixedClipping` expects clipping to happen " +"on the client-side, usually by using the built-in `fixedclipping_mod`." msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +"与在服务器端执行剪切的 \"DifferentialPrivacyServerSideFixedClipping " +"\"相比,\"DifferentialPrivacyClientSideFixedClipping \"希望在客户端进行剪切,通常是使用内置的 " +"\"fixedclipping_mod\"。" -#: ../../source/ref-changelog.md:29 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:12 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:5 +#: of +#, fuzzy msgid "" -"The Flower Next simulation engine comes with improved and configurable " -"logging. The Ray-based simulation backend in Flower 1.9 was updated to " -"use Ray 2.10." -msgstr "" +"The noise multiplier for the Gaussian mechanism for model updates. A " +"value of 1.0 or higher is recommended for strong privacy." +msgstr "模型更新高斯机制的噪声乘数。建议使用 1.0 或更高的值,以获得较强的隐私性。" -#: ../../source/ref-changelog.md:31 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:26 +#: of #, fuzzy msgid "" -"**Introduce FedPFT baseline** " -"([#3268](https://github.com/adap/flower/pull/3268))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +"Wrap the strategy with the `DifferentialPrivacyClientSideFixedClipping` " +"wrapper:" +msgstr "用 \"DifferentialPrivacyClientSideFixedClipping \"包装器包装策略:" -#: ../../source/ref-changelog.md:33 -msgid "" -"FedPFT allows you to perform one-shot Federated Learning by leveraging " -"widely available foundational models, dramatically reducing communication" -" costs while delivering high performing models. This is work led by Mahdi" -" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " -"details in their paper: \"Parametric Feature Transfer: One-shot Federated" -" Learning with Foundation Models\" " -"([arxiv](https://arxiv.org/abs/2402.01862))" -msgstr "" +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping:32 +#: of +#, fuzzy +msgid "On the client, add the `fixedclipping_mod` to the client-side mods:" +msgstr "在客户端,将 \"fixedclipping_mod \"添加到客户端模块中:" -#: ../../source/ref-changelog.md:35 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " -"Transformers, scikit-learn and TensorFlow** " -"([#3291](https://github.com/adap/flower/pull/3291), " -"[#3139](https://github.com/adap/flower/pull/3139), " -"[#3284](https://github.com/adap/flower/pull/3284), " -"[#3251](https://github.com/adap/flower/pull/3251), " -"[#3376](https://github.com/adap/flower/pull/3376), " -"[#3287](https://github.com/adap/flower/pull/3287))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:37 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The `flwr` CLI's `flwr new` command is starting to become everone's " -"favorite way of creating new Flower projects. This release introduces " -"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," -" scikit-learn and TensorFlow. In addition to that, existing templates " -"also received updates." +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:39 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "" -"**Refine** `RecordSet` **API** " -"([#3209](https://github.com/adap/flower/pull/3209), " -"[#3331](https://github.com/adap/flower/pull/3331), " -"[#3334](https://github.com/adap/flower/pull/3334), " -"[#3335](https://github.com/adap/flower/pull/3335), " -"[#3375](https://github.com/adap/flower/pull/3375), " -"[#3368](https://github.com/adap/flower/pull/3368))" -msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +msgid "Add noise to the aggregated parameters." +msgstr "然后将汇总结果序列化:" -#: ../../source/ref-changelog.md:41 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"`RecordSet` is part of the Flower Next low-level API preview release. In " -"Flower 1.9, `RecordSet` received a number of usability improvements that " -"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:43 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " -"[#3430](https://github.com/adap/flower/pull/3430), " -"[#3461](https://github.com/adap/flower/pull/3461), " -"[#3360](https://github.com/adap/flower/pull/3360), " -"[#3433](https://github.com/adap/flower/pull/3433))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:45 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Logs received a substantial update. Not only are logs now much nicer to " -"look at, but they are also more configurable." +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:47 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Improve reliability** " -"([#3564](https://github.com/adap/flower/pull/3564), " -"[#3561](https://github.com/adap/flower/pull/3561), " -"[#3566](https://github.com/adap/flower/pull/3566), " -"[#3462](https://github.com/adap/flower/pull/3462), " -"[#3225](https://github.com/adap/flower/pull/3225), " -"[#3514](https://github.com/adap/flower/pull/3514), " -"[#3535](https://github.com/adap/flower/pull/3535), " -"[#3372](https://github.com/adap/flower/pull/3372))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-changelog.md:49 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideAdaptiveClipping" +msgstr "DifferentialPrivacyServerSideAdaptiveClipping" + +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:17 +#: of +#, fuzzy msgid "" -"Flower 1.9 includes reliability improvements across many parts of the " -"system. One example is a much improved SuperNode shutdown procedure." -msgstr "" +"The standard deviation of the noise added to the count of updates below " +"the estimate. Andrew et al. recommends to set to " +"`expected_num_records/20`" +msgstr "添加到低于估计值的更新计数中的噪声标准偏差。安德鲁等人建议设置为 \"expected_num_records/20" -#: ../../source/ref-changelog.md:51 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping:27 +#: of #, fuzzy msgid "" -"**Update Swift and C++ SDKs** " -"([#3321](https://github.com/adap/flower/pull/3321), " -"[#2763](https://github.com/adap/flower/pull/2763))" -msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"Wrap the strategy with the DifferentialPrivacyServerSideAdaptiveClipping " +"wrapper" +msgstr "用 DifferentialPrivacyServerSideAdaptiveClipping 封装器封装策略" -#: ../../source/ref-changelog.md:53 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"In the C++ SDK, communication-related code is now separate from main " -"client logic. A new abstract class `Communicator` has been introduced " -"alongside a gRPC implementation of it." +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:55 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Improve testing, tooling and CI/CD infrastructure** " -"([#3294](https://github.com/adap/flower/pull/3294), " -"[#3282](https://github.com/adap/flower/pull/3282), " -"[#3311](https://github.com/adap/flower/pull/3311), " -"[#2878](https://github.com/adap/flower/pull/2878), " -"[#3333](https://github.com/adap/flower/pull/3333), " -"[#3255](https://github.com/adap/flower/pull/3255), " -"[#3349](https://github.com/adap/flower/pull/3349), " -"[#3400](https://github.com/adap/flower/pull/3400), " -"[#3401](https://github.com/adap/flower/pull/3401), " -"[#3399](https://github.com/adap/flower/pull/3399), " -"[#3346](https://github.com/adap/flower/pull/3346), " -"[#3398](https://github.com/adap/flower/pull/3398), " -"[#3397](https://github.com/adap/flower/pull/3397), " -"[#3347](https://github.com/adap/flower/pull/3347), " -"[#3502](https://github.com/adap/flower/pull/3502), " -"[#3387](https://github.com/adap/flower/pull/3387), " -"[#3542](https://github.com/adap/flower/pull/3542), " -"[#3396](https://github.com/adap/flower/pull/3396), " -"[#3496](https://github.com/adap/flower/pull/3496), " -"[#3465](https://github.com/adap/flower/pull/3465), " -"[#3473](https://github.com/adap/flower/pull/3473), " -"[#3484](https://github.com/adap/flower/pull/3484), " -"[#3521](https://github.com/adap/flower/pull/3521), " -"[#3363](https://github.com/adap/flower/pull/3363), " -"[#3497](https://github.com/adap/flower/pull/3497), " -"[#3464](https://github.com/adap/flower/pull/3464), " -"[#3495](https://github.com/adap/flower/pull/3495), " -"[#3478](https://github.com/adap/flower/pull/3478), " -"[#3271](https://github.com/adap/flower/pull/3271))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:57 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"As always, the Flower tooling, testing, and CI/CD infrastructure has " -"received many updates." +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:59 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Improve documentation** " -"([#3530](https://github.com/adap/flower/pull/3530), " -"[#3539](https://github.com/adap/flower/pull/3539), " -"[#3425](https://github.com/adap/flower/pull/3425), " -"[#3520](https://github.com/adap/flower/pull/3520), " -"[#3286](https://github.com/adap/flower/pull/3286), " -"[#3516](https://github.com/adap/flower/pull/3516), " -"[#3523](https://github.com/adap/flower/pull/3523), " -"[#3545](https://github.com/adap/flower/pull/3545), " -"[#3498](https://github.com/adap/flower/pull/3498), " -"[#3439](https://github.com/adap/flower/pull/3439), " -"[#3440](https://github.com/adap/flower/pull/3440), " -"[#3382](https://github.com/adap/flower/pull/3382), " -"[#3559](https://github.com/adap/flower/pull/3559), " -"[#3432](https://github.com/adap/flower/pull/3432), " -"[#3278](https://github.com/adap/flower/pull/3278), " -"[#3371](https://github.com/adap/flower/pull/3371), " -"[#3519](https://github.com/adap/flower/pull/3519), " -"[#3267](https://github.com/adap/flower/pull/3267), " -"[#3204](https://github.com/adap/flower/pull/3204), " -"[#3274](https://github.com/adap/flower/pull/3274))" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:61 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"As always, the Flower documentation has received many updates. Notable " -"new pages include:" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:63 +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"[How-to upgrate to Flower Next (Flower Next migration " -"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" -"next.html)" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" -#: ../../source/ref-changelog.md:65 +#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst:2 +#, fuzzy +msgid "DifferentialPrivacyServerSideFixedClipping" +msgstr "差分隐私" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping:19 +#: of #, fuzzy msgid "" -"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" -"run-flower-using-docker.html)" -msgstr "" -"`TensorFlow快速入门 (教程) `_" +"Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping " +"wrapper" +msgstr "用 DifferentialPrivacyServerSideFixedClipping 封装器封装策略" -#: ../../source/ref-changelog.md:67 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"[Flower Mods reference](https://flower.ai/docs/framework/ref-" -"api/flwr.client.mod.html#module-flwr.client.mod)" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" msgstr "" +":py:obj:`aggregate_evaluate " +"`\\" +" \\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:69 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#3205](https://github.com/adap/flower/pull/3205), " -"[#3226](https://github.com/adap/flower/pull/3226), " -"[#3211](https://github.com/adap/flower/pull/3211), " -"[#3252](https://github.com/adap/flower/pull/3252), " -"[#3427](https://github.com/adap/flower/pull/3427), " -"[#3410](https://github.com/adap/flower/pull/3410), " -"[#3426](https://github.com/adap/flower/pull/3426), " -"[#3228](https://github.com/adap/flower/pull/3228), " -"[#3342](https://github.com/adap/flower/pull/3342), " -"[#3200](https://github.com/adap/flower/pull/3200), " -"[#3202](https://github.com/adap/flower/pull/3202), " -"[#3394](https://github.com/adap/flower/pull/3394), " -"[#3488](https://github.com/adap/flower/pull/3488), " -"[#3329](https://github.com/adap/flower/pull/3329), " -"[#3526](https://github.com/adap/flower/pull/3526), " -"[#3392](https://github.com/adap/flower/pull/3392), " -"[#3474](https://github.com/adap/flower/pull/3474), " -"[#3269](https://github.com/adap/flower/pull/3269))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +":py:obj:`aggregate_fit " +"`\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:71 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:1 +#: of #, fuzzy -msgid "As always, Flower code examples have received many updates." -msgstr "许多 \"Flower \"代码示例得到了大幅更新。" +msgid "Compute the updates, clip, and pass them for aggregation." +msgstr "计算更新、剪辑并将其传递给聚合。" -#: ../../source/ref-changelog.md:73 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**General improvements** " -"([#3532](https://github.com/adap/flower/pull/3532), " -"[#3318](https://github.com/adap/flower/pull/3318), " -"[#3565](https://github.com/adap/flower/pull/3565), " -"[#3296](https://github.com/adap/flower/pull/3296), " -"[#3305](https://github.com/adap/flower/pull/3305), " -"[#3246](https://github.com/adap/flower/pull/3246), " -"[#3224](https://github.com/adap/flower/pull/3224), " -"[#3475](https://github.com/adap/flower/pull/3475), " -"[#3297](https://github.com/adap/flower/pull/3297), " -"[#3317](https://github.com/adap/flower/pull/3317), " -"[#3429](https://github.com/adap/flower/pull/3429), " -"[#3196](https://github.com/adap/flower/pull/3196), " -"[#3534](https://github.com/adap/flower/pull/3534), " -"[#3240](https://github.com/adap/flower/pull/3240), " -"[#3365](https://github.com/adap/flower/pull/3365), " -"[#3407](https://github.com/adap/flower/pull/3407), " -"[#3563](https://github.com/adap/flower/pull/3563), " -"[#3344](https://github.com/adap/flower/pull/3344), " -"[#3330](https://github.com/adap/flower/pull/3330), " -"[#3436](https://github.com/adap/flower/pull/3436), " -"[#3300](https://github.com/adap/flower/pull/3300), " -"[#3327](https://github.com/adap/flower/pull/3327), " -"[#3254](https://github.com/adap/flower/pull/3254), " -"[#3253](https://github.com/adap/flower/pull/3253), " -"[#3419](https://github.com/adap/flower/pull/3419), " -"[#3289](https://github.com/adap/flower/pull/3289), " -"[#3208](https://github.com/adap/flower/pull/3208), " -"[#3245](https://github.com/adap/flower/pull/3245), " -"[#3319](https://github.com/adap/flower/pull/3319), " -"[#3203](https://github.com/adap/flower/pull/3203), " -"[#3423](https://github.com/adap/flower/pull/3423), " -"[#3352](https://github.com/adap/flower/pull/3352), " -"[#3292](https://github.com/adap/flower/pull/3292), " -"[#3261](https://github.com/adap/flower/pull/3261))" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:75 ../../source/ref-changelog.md:1058 -msgid "Deprecations" -msgstr "停用" - -#: ../../source/ref-changelog.md:77 -#, fuzzy -msgid "**Deprecate Python 3.8 support**" -msgstr "** 过时的 Python 3.7**" - -#: ../../source/ref-changelog.md:79 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Python 3.8 will stop receiving security fixes in [October " -"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " -"now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit " +"`\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:81 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" -"api` ([#3416](https://github.com/adap/flower/pull/3416), " -"[#3420](https://github.com/adap/flower/pull/3420))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`evaluate " +"`\\" +" \\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:83 +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" -" and `flower-fleet-api`. Both commands will be removed in an upcoming " -"release. Use `flower-superlink` instead." +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\" +" \\(client\\_manager\\)" + +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping.aggregate_fit:3 +#: of +#, fuzzy +msgid "Afterward, add noise to the aggregated parameters." +msgstr "然后,在汇总参数中添加噪声。" + +#: ../../source/ref-api/flwr.server.strategy.FaultTolerantFedAvg.rst:2 +#, fuzzy +msgid "FaultTolerantFedAvg" +msgstr "server.strategy.FaultTolerantFedAvg" -#: ../../source/ref-changelog.md:85 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Deprecate** `--server` **in favor of** `--superlink` " -"([#3518](https://github.com/adap/flower/pull/3518))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:87 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The commands `flower-server-app` and `flower-client-app` should use " -"`--superlink` instead of the now deprecated `--server`. Support for " -"`--server` will be removed in a future release." +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:89 ../../source/ref-changelog.md:163 -#: ../../source/ref-changelog.md:238 ../../source/ref-changelog.md:350 -#: ../../source/ref-changelog.md:440 ../../source/ref-changelog.md:504 -#: ../../source/ref-changelog.md:562 ../../source/ref-changelog.md:631 -#: ../../source/ref-changelog.md:693 ../../source/ref-changelog.md:712 -#: ../../source/ref-changelog.md:868 ../../source/ref-changelog.md:939 -#: ../../source/ref-changelog.md:976 ../../source/ref-changelog.md:1019 -msgid "Incompatible changes" -msgstr "不兼容的更改" +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "使用加权平均法汇总拟合结果。" -#: ../../source/ref-changelog.md:91 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " -"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " -"([#3512](https://github.com/adap/flower/pull/3512), " -"[#3408](https://github.com/adap/flower/pull/3408))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:93 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"SSL-related `flower-superlink` CLI arguments were restructured in an " -"incompatible way. Instead of passing a single `--certificates` flag with " -"three values, you now need to pass three flags (`--ssl-ca-certfile`, " -"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " -"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" -"connections.html) documentation page for details." +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:95 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Remove SuperLink** `--vce` **option** " -"([#3513](https://github.com/adap/flower/pull/3513))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:97 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Instead of separately starting a SuperLink and a `ServerApp` for " -"simulation, simulations must now be started using the single `flower-" -"simulation` command." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:99 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " -"([#3527](https://github.com/adap/flower/pull/3527))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:101 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"To simplify the usage of `flower-superlink`, previously separate sets of " -"CLI options for gRPC and REST were merged into one unified set of " -"options. Consult the [Flower CLI reference " -"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " -"details." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst:2 +#: ../../source/ref-changelog.md:1231 +msgid "FedAdagrad" +msgstr "FedAdagrad" -#: ../../source/ref-changelog.md:103 +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 +#: flwr.server.strategy.fedadam.FedAdam:1 +#: flwr.server.strategy.fedyogi.FedYogi:1 of #, fuzzy -msgid "v1.8.0 (2024-04-03)" -msgstr "v1.3.0 (2023-02-06)" +msgid "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" +msgstr "Bases: :py:class:`~flwr.server.strategy.fedopt.FedOpt`" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 flwr.server.strategy.fedopt.FedOpt:3 +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "实施基于 https://arxiv.org/abs/2003.00295v5" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad:23 +#: flwr.server.strategy.fedadam.FedAdam:25 +#: flwr.server.strategy.fedadam.FedAdam:27 +#: flwr.server.strategy.fedavg.FedAvg:29 flwr.server.strategy.fedavg.FedAvg:31 +#: flwr.server.strategy.fedopt.FedOpt:25 flwr.server.strategy.fedopt.FedOpt:27 +#: flwr.server.strategy.fedprox.FedProx:61 +#: flwr.server.strategy.fedprox.FedProx:63 +#: flwr.server.strategy.fedyogi.FedYogi:28 +#: flwr.server.strategy.fedyogi.FedYogi:30 of +msgid "Metrics aggregation function, optional." +msgstr "指标汇总功能,可选。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:29 +#: flwr.server.strategy.fedadam.FedAdam:29 +#: flwr.server.strategy.fedopt.FedOpt:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "服务器端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:31 +#: flwr.server.strategy.fedadam.FedAdam:31 +#: flwr.server.strategy.fedopt.FedOpt:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "客户端学习率。默认为 1e-1。" -#: ../../source/ref-changelog.md:109 +#: flwr.server.strategy.fedadagrad.FedAdagrad:33 +#: flwr.server.strategy.fedadam.FedAdam:37 +#: flwr.server.strategy.fedopt.FedOpt:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "控制算法的适应度。默认为 1e-9。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " -"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " -"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " -"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " -"`tabdar-khan` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:113 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next high-level API (stable)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" msgstr "" -"**介绍 Flower Next 高级应用程序接口(稳定版)** " -"([#3002](https://github.com/adap/flower/pull/3002), " -"[#2934](https://github.com/adap/flower/pull/2934), " -"[#2958](https://github.com/adap/flower/pull/2958), " -"[#3173](https://github.com/adap/flower/pull/3173), " -"[#3174](https://github.com/adap/flower/pull/3174), " -"[#2923](https://github.com/adap/flower/pull/2923), " -"[#2691](https://github.com/adap/flower/pull/2691), " -"[#3079](https://github.com/adap/flower/pull/3079), " -"[#2961](https://github.com/adap/flower/pull/2961), " -"[#2924](https://github.com/adap/flower/pull/2924), " -"[#3166](https://github.com/adap/flower/pull/3166), " -"[#3031](https://github.com/adap/flower/pull/3031), " -"[#3057](https://github.com/adap/flower/pull/3057), " -"[#3000](https://github.com/adap/flower/pull/3000), " -"[#3113](https://github.com/adap/flower/pull/3113), " -"[#2957](https://github.com/adap/flower/pull/2957), " -"[#3183](https://github.com/adap/flower/pull/3183), " -"[#3180](https://github.com/adap/flower/pull/3180), " -"[#3035](https://github.com/adap/flower/pull/3035), " -"[#3189](https://github.com/adap/flower/pull/3189), " -"[#3185](https://github.com/adap/flower/pull/3185), " -"[#3190](https://github.com/adap/flower/pull/3190), " -"[#3191](https://github.com/adap/flower/pull/3191), " -"[#3195](https://github.com/adap/flower/pull/3195), " -"[#3197](https://github.com/adap/flower/pull/3197))" +":py:obj:`aggregate_fit `\\" +" \\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:115 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The Flower Next high-level API is stable! Flower Next is the future of " -"Flower - all new features (like Flower Mods) will be built on top of it. " -"You can start to migrate your existing projects to Flower Next by using " -"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " -"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." -" Flower Next allows you to run multiple projects concurrently (we call " -"this multi-run) and execute the same project in either simulation " -"environments or deployment environments without having to change a single" -" line of code. The best part? It's fully compatible with existing Flower " -"projects that use `Strategy`, `NumPyClient` & co." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " -"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " -"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " -"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " -"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:117 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next low-level API (preview)** " -"([#3062](https://github.com/adap/flower/pull/3062), " -"[#3034](https://github.com/adap/flower/pull/3034), " -"[#3069](https://github.com/adap/flower/pull/3069))" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`configure_fit `\\" +" \\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:119 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"In addition to the Flower Next *high-level* API that uses `Strategy`, " -"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " -"new Flower Next *low-level* API. The low-level API allows for granular " -"control of every aspect of the learning process by sending/receiving " -"individual messages to/from client nodes. The new `ServerApp` supports " -"registering a custom `main` function that allows writing custom training " -"loops for methods like async FL, cyclic training, or federated analytics." -" The new `ClientApp` supports registering `train`, `evaluate` and `query`" -" functions that can access the raw message received from the `ServerApp`." -" New abstractions like `RecordSet`, `Message` and `Context` further " -"enable sending multiple models, multiple sets of config values and " -"metrics, stateful computations on the client node and implementations of " -"custom SMPC protocols, to name just a few." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " -"还提供了新的 Flower Next " -"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " -"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " -"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " -"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " -"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:121 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Mods (preview)** " -"([#3054](https://github.com/adap/flower/pull/3054), " -"[#2911](https://github.com/adap/flower/pull/2911), " -"[#3083](https://github.com/adap/flower/pull/3083))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:123 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Flower Modifiers (we call them Mods) can intercept messages and analyze, " -"edit or handle them directly. Mods can be used to develop pluggable " -"modules that work across different projects. Flower 1.8 already includes " -"mods to log the size of a message, the number of parameters sent over the" -" network, differential privacy with fixed clipping and adaptive clipping," -" local differential privacy and secure aggregation protocols SecAgg and " -"SecAgg+. The Flower Mods API is released as a preview, but researchers " -"can already use it to experiment with arbirtrary SMPC protocols." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Flower Modifiers(我们称之为 " -"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " -"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" -" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:125 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Fine-tune LLMs with LLM FlowerTune** " -"([#3029](https://github.com/adap/flower/pull/3029), " -"[#3089](https://github.com/adap/flower/pull/3089), " -"[#3092](https://github.com/adap/flower/pull/3092), " -"[#3100](https://github.com/adap/flower/pull/3100), " -"[#3114](https://github.com/adap/flower/pull/3114), " -"[#3162](https://github.com/adap/flower/pull/3162), " -"[#3172](https://github.com/adap/flower/pull/3172))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:127 +#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst:2 #, fuzzy -msgid "" -"We are introducing LLM FlowerTune, an introductory example that " -"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " -"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" -" different models and/or datasets. Read our blog post [LLM FlowerTune: " -"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" -"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." -msgstr "" -"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" -" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " -"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" -"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" +msgid "FedAdam" +msgstr "FedAdagrad" -#: ../../source/ref-changelog.md:129 +#: flwr.server.strategy.fedadam.FedAdam:33 +#: flwr.server.strategy.fedyogi.FedYogi:36 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "动量参数。默认为 0.9。" + +#: flwr.server.strategy.fedadam.FedAdam:35 +#: flwr.server.strategy.fedyogi.FedYogi:38 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "第二动量参数。默认为 0.99。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce built-in Differential Privacy (preview)** " -"([#2798](https://github.com/adap/flower/pull/2798), " -"[#2959](https://github.com/adap/flower/pull/2959), " -"[#3038](https://github.com/adap/flower/pull/3038), " -"[#3147](https://github.com/adap/flower/pull/3147), " -"[#2909](https://github.com/adap/flower/pull/2909), " -"[#2893](https://github.com/adap/flower/pull/2893), " -"[#2892](https://github.com/adap/flower/pull/2892), " -"[#3039](https://github.com/adap/flower/pull/3039), " -"[#3074](https://github.com/adap/flower/pull/3074))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:131 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Built-in Differential Privacy is here! Flower supports both central and " -"local differential privacy (DP). Central DP can be configured with either" -" fixed or adaptive clipping. The clipping can happen either on the " -"server-side or the client-side. Local DP does both clipping and noising " -"on the client-side. A new documentation page [explains Differential " -"Privacy approaches](https://flower.ai/docs/framework/explanation-" -"differential-privacy.html) and a new how-to guide describes [how to use " -"the new Differential Privacy components](https://flower.ai/docs/framework" -"/how-to-use-differential-privacy.html) in Flower." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" -" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" -"/explanation-differential-privacy.html) " -"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" -"differential-privacy.html) 介绍了 Flower 的使用方法。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:133 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce built-in Secure Aggregation (preview)** " -"([#3120](https://github.com/adap/flower/pull/3120), " -"[#3110](https://github.com/adap/flower/pull/3110), " -"[#3108](https://github.com/adap/flower/pull/3108))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:135 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Built-in Secure Aggregation is here! Flower now supports different secure" -" aggregation protocols out-of-the-box. The best part? You can add secure " -"aggregation to your Flower projects with only a few lines of code. In " -"this initial release, we inlcude support for SecAgg and SecAgg+, but more" -" protocols will be implemented shortly. We'll also add detailed docs that" -" explain secure aggregation and how to use it in Flower. You can already " -"check out the new code example that shows how to use Flower to easily " -"combine Federated Learning, Differential Privacy and Secure Aggregation " -"in the same project." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " -"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " -"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " -"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:137 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `flwr` **CLI (preview)** " -"([#2942](https://github.com/adap/flower/pull/2942), " -"[#3055](https://github.com/adap/flower/pull/3055), " -"[#3111](https://github.com/adap/flower/pull/3111), " -"[#3130](https://github.com/adap/flower/pull/3130), " -"[#3136](https://github.com/adap/flower/pull/3136), " -"[#3094](https://github.com/adap/flower/pull/3094), " -"[#3059](https://github.com/adap/flower/pull/3059), " -"[#3049](https://github.com/adap/flower/pull/3049), " -"[#3142](https://github.com/adap/flower/pull/3142))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:139 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" -" and then running them using the Simulation Engine (`flwr run`)." -msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:141 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce Flower Next Simulation Engine** " -"([#3024](https://github.com/adap/flower/pull/3024), " -"[#3061](https://github.com/adap/flower/pull/3061), " -"[#2997](https://github.com/adap/flower/pull/2997), " -"[#2783](https://github.com/adap/flower/pull/2783), " -"[#3184](https://github.com/adap/flower/pull/3184), " -"[#3075](https://github.com/adap/flower/pull/3075), " -"[#3047](https://github.com/adap/flower/pull/3047), " -"[#2998](https://github.com/adap/flower/pull/2998), " -"[#3009](https://github.com/adap/flower/pull/3009), " -"[#3008](https://github.com/adap/flower/pull/3008))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:143 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The Flower Simulation Engine can now run Flower Next projects. For " -"notebook environments, there's also a new `run_simulation` function that " -"can run `ServerApp` and `ClientApp`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " -"`ServerApp` 和 `ClientApp`。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:145 +#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst:2 #, fuzzy +msgid "FedAvg" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavg.FedAvg:3 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "实施基于 https://arxiv.org/abs/1602.05629" + +#: flwr.server.strategy.fedavg.FedAvg:5 flwr.server.strategy.fedprox.FedProx:37 +#: of msgid "" -"**Handle SuperNode connection errors** " -"([#2969](https://github.com/adap/flower/pull/2969))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" +"训练过程中使用的客户端比例。如果 `min_fit_clients` 大于 `fraction_fit * " +"available_clients`,则仍会对 `min_fit_clients` 进行采样。默认为 1.0。" -#: ../../source/ref-changelog.md:147 -#, fuzzy +#: flwr.server.strategy.fedavg.FedAvg:9 flwr.server.strategy.fedprox.FedProx:41 +#: of msgid "" -"A SuperNode will now try to reconnect indefinitely to the SuperLink in " -"case of connection errors. The arguments `--max-retries` and `--max-wait-" -"time` can now be passed to the `flower-client-app` command. `--max-" -"retries` will define the number of tentatives the client should make " -"before it gives up trying to reconnect to the SuperLink, and, `--max-" -"wait-time` defines the time before the SuperNode gives up trying to " -"reconnect to the SuperLink." +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." msgstr "" -"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" -"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " -"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" +"验证过程中使用的客户端的比例。如果 `min_evaluate_clients` 大于 `fraction_evaluate * " +"available_clients`,则仍会对 `min_evaluate_clients` 进行采样。默认为 1.0。" -#: ../../source/ref-changelog.md:149 +#: flwr.server.strategy.fedavg.FedAvg:33 of +#, fuzzy +msgid "Enable (True) or disable (False) in-place aggregation of model updates." +msgstr "启用(真)或禁用(假)模型更新的就地聚合。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2904](https://github.com/adap/flower/pull/2904), " -"[#2482](https://github.com/adap/flower/pull/2482), " -"[#2985](https://github.com/adap/flower/pull/2985), " -"[#2968](https://github.com/adap/flower/pull/2968))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:151 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"baseline. Several other baselined have been updated as well." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " -"基准线。其他几条基准线也已更新。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:153 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Improve documentation and translations** " -"([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " -"[#3044](https://github.com/adap/flower/pull/3044), " -"[#3043](https://github.com/adap/flower/pull/3043), " -"[#2986](https://github.com/adap/flower/pull/2986), " -"[#3041](https://github.com/adap/flower/pull/3041), " -"[#3046](https://github.com/adap/flower/pull/3046), " -"[#3042](https://github.com/adap/flower/pull/3042), " -"[#2978](https://github.com/adap/flower/pull/2978), " -"[#2952](https://github.com/adap/flower/pull/2952), " -"[#3167](https://github.com/adap/flower/pull/3167), " -"[#2953](https://github.com/adap/flower/pull/2953), " -"[#3045](https://github.com/adap/flower/pull/3045), " -"[#2654](https://github.com/adap/flower/pull/2654), " -"[#3082](https://github.com/adap/flower/pull/3082), " -"[#2990](https://github.com/adap/flower/pull/2990), " -"[#2989](https://github.com/adap/flower/pull/2989))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:155 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"As usual, we merged many smaller and larger improvements to the " -"documentation. A special thank you goes to [Sebastian van der " -"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " -"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:157 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([3134](https://github.com/adap/flower/pull/3134), " -"[2996](https://github.com/adap/flower/pull/2996), " -"[2930](https://github.com/adap/flower/pull/2930), " -"[2967](https://github.com/adap/flower/pull/2967), " -"[2467](https://github.com/adap/flower/pull/2467), " -"[2910](https://github.com/adap/flower/pull/2910), " -"[#2918](https://github.com/adap/flower/pull/2918), " -"[#2773](https://github.com/adap/flower/pull/2773), " -"[#3063](https://github.com/adap/flower/pull/3063), " -"[#3116](https://github.com/adap/flower/pull/3116), " -"[#3117](https://github.com/adap/flower/pull/3117))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:159 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Two new examples show federated training of a Vision Transformer (ViT) " -"and federated learning in a medical context using the popular MONAI " -"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" -" new Flower Next `ServerApp` and `ClientApp`. Many other examples " -"received considerable updates as well." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " -"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " -"\"ClientApp\"。许多其他示例也得到了大量更新。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:161 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General improvements** " -"([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " -"[3099](https://github.com/adap/flower/pull/3099), " -"[3003](https://github.com/adap/flower/pull/3003), " -"[3145](https://github.com/adap/flower/pull/3145), " -"[3017](https://github.com/adap/flower/pull/3017), " -"[3085](https://github.com/adap/flower/pull/3085), " -"[3012](https://github.com/adap/flower/pull/3012), " -"[3119](https://github.com/adap/flower/pull/3119), " -"[2991](https://github.com/adap/flower/pull/2991), " -"[2970](https://github.com/adap/flower/pull/2970), " -"[2980](https://github.com/adap/flower/pull/2980), " -"[3086](https://github.com/adap/flower/pull/3086), " -"[2932](https://github.com/adap/flower/pull/2932), " -"[2928](https://github.com/adap/flower/pull/2928), " -"[2941](https://github.com/adap/flower/pull/2941), " -"[2933](https://github.com/adap/flower/pull/2933), " -"[3181](https://github.com/adap/flower/pull/3181), " -"[2973](https://github.com/adap/flower/pull/2973), " -"[2992](https://github.com/adap/flower/pull/2992), " -"[2915](https://github.com/adap/flower/pull/2915), " -"[3040](https://github.com/adap/flower/pull/3040), " -"[3022](https://github.com/adap/flower/pull/3022), " -"[3032](https://github.com/adap/flower/pull/3032), " -"[2902](https://github.com/adap/flower/pull/2902), " -"[2931](https://github.com/adap/flower/pull/2931), " -"[3005](https://github.com/adap/flower/pull/3005), " -"[3132](https://github.com/adap/flower/pull/3132), " -"[3115](https://github.com/adap/flower/pull/3115), " -"[2944](https://github.com/adap/flower/pull/2944), " -"[3064](https://github.com/adap/flower/pull/3064), " -"[3106](https://github.com/adap/flower/pull/3106), " -"[2974](https://github.com/adap/flower/pull/2974), " -"[3178](https://github.com/adap/flower/pull/3178), " -"[2993](https://github.com/adap/flower/pull/2993), " -"[3186](https://github.com/adap/flower/pull/3186), " -"[3091](https://github.com/adap/flower/pull/3091), " -"[3125](https://github.com/adap/flower/pull/3125), " -"[3093](https://github.com/adap/flower/pull/3093), " -"[3013](https://github.com/adap/flower/pull/3013), " -"[3033](https://github.com/adap/flower/pull/3033), " -"[3133](https://github.com/adap/flower/pull/3133), " -"[3068](https://github.com/adap/flower/pull/3068), " -"[2916](https://github.com/adap/flower/pull/2916), " -"[2975](https://github.com/adap/flower/pull/2975), " -"[2984](https://github.com/adap/flower/pull/2984), " -"[2846](https://github.com/adap/flower/pull/2846), " -"[3077](https://github.com/adap/flower/pull/3077), " -"[3143](https://github.com/adap/flower/pull/3143), " -"[2921](https://github.com/adap/flower/pull/2921), " -"[3101](https://github.com/adap/flower/pull/3101), " -"[2927](https://github.com/adap/flower/pull/2927), " -"[2995](https://github.com/adap/flower/pull/2995), " -"[2972](https://github.com/adap/flower/pull/2972), " -"[2912](https://github.com/adap/flower/pull/2912), " -"[3065](https://github.com/adap/flower/pull/3065), " -"[3028](https://github.com/adap/flower/pull/3028), " -"[2922](https://github.com/adap/flower/pull/2922), " -"[2982](https://github.com/adap/flower/pull/2982), " -"[2914](https://github.com/adap/flower/pull/2914), " -"[3179](https://github.com/adap/flower/pull/3179), " -"[3080](https://github.com/adap/flower/pull/3080), " -"[2994](https://github.com/adap/flower/pull/2994), " -"[3187](https://github.com/adap/flower/pull/3187), " -"[2926](https://github.com/adap/flower/pull/2926), " -"[3018](https://github.com/adap/flower/pull/3018), " -"[3144](https://github.com/adap/flower/pull/3144), " -"[3011](https://github.com/adap/flower/pull/3011), " -"[#3152](https://github.com/adap/flower/pull/3152), " -"[#2836](https://github.com/adap/flower/pull/2836), " -"[#2929](https://github.com/adap/flower/pull/2929), " -"[#2943](https://github.com/adap/flower/pull/2943), " -"[#2955](https://github.com/adap/flower/pull/2955), " -"[#2954](https://github.com/adap/flower/pull/2954))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:165 ../../source/ref-changelog.md:442 -#: ../../source/ref-changelog.md:506 ../../source/ref-changelog.md:564 -#: ../../source/ref-changelog.md:633 ../../source/ref-changelog.md:695 -msgid "None" -msgstr "无" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:167 +#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst:2 #, fuzzy -msgid "v1.7.0 (2024-02-05)" -msgstr "v1.3.0 (2023-02-06)" +msgid "FedAvgAndroid" +msgstr "DPFedAvgAdaptive" -#: ../../source/ref-changelog.md:173 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " -"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " -"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " -"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " -"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " -"Shaaban`, `Yan Gao`, `Yasar Abbas` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:177 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce stateful clients (experimental)** " -"([#2770](https://github.com/adap/flower/pull/2770), " -"[#2686](https://github.com/adap/flower/pull/2686), " -"[#2696](https://github.com/adap/flower/pull/2696), " -"[#2643](https://github.com/adap/flower/pull/2643), " -"[#2769](https://github.com/adap/flower/pull/2769))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:179 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Subclasses of `Client` and `NumPyClient` can now store local state that " -"remains on the client. Let's start with the highlight first: this new " -"feature is compatible with both simulated clients (via " -"`start_simulation`) and networked clients (via `start_client`). It's also" -" the first preview of new abstractions like `Context` and `RecordSet`. " -"Clients can access state of type `RecordSet` via `state: RecordSet = " -"self.context.state`. Changes to this `RecordSet` are preserved across " -"different rounds of execution to enable stateful computations in a " -"unified way across simulation and deployment." +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" msgstr "" -"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " -"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " -"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " -"RecordSet = self.context.state`。对该 `RecordSet` " -"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" +":py:obj:`bytes_to_ndarray " +"`\\ \\(tensor\\)" -#: ../../source/ref-changelog.md:181 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.bytes_to_ndarray:1 of #, fuzzy -msgid "" -"**Improve performance** " -"([#2293](https://github.com/adap/flower/pull/2293))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" +msgid "Deserialize NumPy array from bytes." +msgstr "从字节反序列化 NumPy ndarray。" -#: ../../source/ref-changelog.md:183 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" -"place aggregation to reduce memory consumption. The Flower client " -"serialization/deserialization has been rewritten from the ground up, " -"which results in significant speedups, especially when the client-side " -"training time is short." +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " -"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:185 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Support Federated Learning with Apple MLX and Flower** " -"([#2693](https://github.com/adap/flower/pull/2693))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:187 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Flower has official support for federated learning using [Apple " -"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " -"example." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" -"explore.github.io/mlx)的联合学习。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:189 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce new XGBoost cyclic strategy** " -"([#2666](https://github.com/adap/flower/pull/2666), " -"[#2668](https://github.com/adap/flower/pull/2668))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:191 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"A new strategy called `FedXgbCyclic` supports a client-by-client style of" -" training (often called cyclic). The `xgboost-comprehensive` code example" -" shows how to use it in a full project. In addition to that, `xgboost-" -"comprehensive` now also supports simulation mode. With this, Flower " -"offers best-in-class XGBoost support." +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" msgstr "" -"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " -"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " -"提供了同类最佳的 XGBoost 支持。" +":py:obj:`ndarray_to_bytes " +"`\\ \\(ndarray\\)" -#: ../../source/ref-changelog.md:193 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.ndarray_to_bytes:1 of #, fuzzy -msgid "" -"**Support Python 3.11** " -"([#2394](https://github.com/adap/flower/pull/2394))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +msgid "Serialize NumPy array to bytes." +msgstr "将 NumPy ndarray 序列化为字节。" -#: ../../source/ref-changelog.md:195 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " -"ensure better support for users using more recent Python versions." -msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" +msgstr "" +":py:obj:`ndarrays_to_parameters " +"`\\ " +"\\(ndarrays\\)" -#: ../../source/ref-changelog.md:197 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Update gRPC and ProtoBuf dependencies** " -"([#2814](https://github.com/adap/flower/pull/2814))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:199 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"The `grpcio` and `protobuf` dependencies were updated to their latest " -"versions for improved security and performance." -msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:201 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce Docker image for Flower server** " -"([#2700](https://github.com/adap/flower/pull/2700), " -"[#2688](https://github.com/adap/flower/pull/2688), " -"[#2705](https://github.com/adap/flower/pull/2705), " -"[#2695](https://github.com/adap/flower/pull/2695), " -"[#2747](https://github.com/adap/flower/pull/2747), " -"[#2746](https://github.com/adap/flower/pull/2746), " -"[#2680](https://github.com/adap/flower/pull/2680), " -"[#2682](https://github.com/adap/flower/pull/2682), " -"[#2701](https://github.com/adap/flower/pull/2701))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +":py:obj:`parameters_to_ndarrays " +"`\\ " +"\\(parameters\\)" -#: ../../source/ref-changelog.md:203 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedavg_android.FedAvgAndroid.parameters_to_ndarrays:1 +#: of +#, fuzzy +msgid "Convert parameters object to NumPy weights." +msgstr "将参数对象转换为 NumPy ndarrays。" + +#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst:2 +#, fuzzy +msgid "FedAvgM" +msgstr "DP-FedAvg" + +#: flwr.server.strategy.fedavgm.FedAvgM:3 of #, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1909.06335" +msgstr "实施基于 https://arxiv.org/pdf/1909.06335.pdf" + +#: flwr.server.strategy.fedavgm.FedAvgM:25 of msgid "" -"The Flower server can now be run using an official Docker image. A new " -"how-to guide explains [how to run Flower using " -"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html). An official Flower client Docker image will follow." +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "服务器端优化中使用的服务器端学习率。默认为 1.0。" + +#: flwr.server.strategy.fedavgm.FedAvgM:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "用于 FedAvgM 的服务器端动量因子。默认为 0.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy +msgid "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " -"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" -"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:205 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `flower-via-docker-compose` **example** " -"([#2626](https://github.com/adap/flower/pull/2626))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:207 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `quickstart-sklearn-tabular` **example** " -"([#2719](https://github.com/adap/flower/pull/2719))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:209 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Introduce** `custom-metrics` **example** " -"([#1958](https://github.com/adap/flower/pull/1958))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:211 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update code examples to use Flower Datasets** " -"([#2450](https://github.com/adap/flower/pull/2450), " -"[#2456](https://github.com/adap/flower/pull/2456), " -"[#2318](https://github.com/adap/flower/pull/2318), " -"[#2712](https://github.com/adap/flower/pull/2712))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:213 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Several code examples were updated to use [Flower " -"Datasets](https://flower.ai/docs/datasets/)." -msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:215 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2381](https://github.com/adap/flower/pull/2381), " -"[#2805](https://github.com/adap/flower/pull/2805), " -"[#2782](https://github.com/adap/flower/pull/2782), " -"[#2806](https://github.com/adap/flower/pull/2806), " -"[#2829](https://github.com/adap/flower/pull/2829), " -"[#2825](https://github.com/adap/flower/pull/2825), " -"[#2816](https://github.com/adap/flower/pull/2816), " -"[#2726](https://github.com/adap/flower/pull/2726), " -"[#2659](https://github.com/adap/flower/pull/2659), " -"[#2655](https://github.com/adap/flower/pull/2655))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:217 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "Many Flower code examples received substantial updates." -msgstr "许多 \"Flower \"代码示例得到了大幅更新。" +msgid "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:312 -msgid "**Update Flower Baselines**" -msgstr "**更新 Flower Baselines**" +#: ../../source/ref-api/flwr.server.strategy.FedMedian.rst:2 +#, fuzzy +msgid "FedMedian" +msgstr "联邦医保" -#: ../../source/ref-changelog.md:221 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " -"[#2771](https://github.com/adap/flower/pull/2771))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:222 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:223 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "使用中位数汇总拟合结果。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgid "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:224 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:225 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" +msgstr "" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:226 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgid "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:228 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Improve documentation** " -"([#2674](https://github.com/adap/flower/pull/2674), " -"[#2480](https://github.com/adap/flower/pull/2480), " -"[#2826](https://github.com/adap/flower/pull/2826), " -"[#2727](https://github.com/adap/flower/pull/2727), " -"[#2761](https://github.com/adap/flower/pull/2761), " -"[#2900](https://github.com/adap/flower/pull/2900))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:230 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Improved testing and development infrastructure** " -"([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " -"[#2676](https://github.com/adap/flower/pull/2676), " -"[#2644](https://github.com/adap/flower/pull/2644), " -"[#2656](https://github.com/adap/flower/pull/2656), " -"[#2848](https://github.com/adap/flower/pull/2848), " -"[#2675](https://github.com/adap/flower/pull/2675), " -"[#2735](https://github.com/adap/flower/pull/2735), " -"[#2767](https://github.com/adap/flower/pull/2767), " -"[#2732](https://github.com/adap/flower/pull/2732), " -"[#2744](https://github.com/adap/flower/pull/2744), " -"[#2681](https://github.com/adap/flower/pull/2681), " -"[#2699](https://github.com/adap/flower/pull/2699), " -"[#2745](https://github.com/adap/flower/pull/2745), " -"[#2734](https://github.com/adap/flower/pull/2734), " -"[#2731](https://github.com/adap/flower/pull/2731), " -"[#2652](https://github.com/adap/flower/pull/2652), " -"[#2720](https://github.com/adap/flower/pull/2720), " -"[#2721](https://github.com/adap/flower/pull/2721), " -"[#2717](https://github.com/adap/flower/pull/2717), " -"[#2864](https://github.com/adap/flower/pull/2864), " -"[#2694](https://github.com/adap/flower/pull/2694), " -"[#2709](https://github.com/adap/flower/pull/2709), " -"[#2658](https://github.com/adap/flower/pull/2658), " -"[#2796](https://github.com/adap/flower/pull/2796), " -"[#2692](https://github.com/adap/flower/pull/2692), " -"[#2657](https://github.com/adap/flower/pull/2657), " -"[#2813](https://github.com/adap/flower/pull/2813), " -"[#2661](https://github.com/adap/flower/pull/2661), " -"[#2398](https://github.com/adap/flower/pull/2398))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst:2 +#, fuzzy +msgid "FedOpt" +msgstr "FedOpt" + +#: flwr.server.strategy.fedopt.FedOpt:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "动量参数。默认为 0.0。" -#: ../../source/ref-changelog.md:232 +#: flwr.server.strategy.fedopt.FedOpt:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "第二动量参数。默认为 0.0。" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The Flower testing and development infrastructure has received " -"substantial updates. This makes Flower 1.7 the most tested release ever." -msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:234 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update dependencies** " -"([#2753](https://github.com/adap/flower/pull/2753), " -"[#2651](https://github.com/adap/flower/pull/2651), " -"[#2739](https://github.com/adap/flower/pull/2739), " -"[#2837](https://github.com/adap/flower/pull/2837), " -"[#2788](https://github.com/adap/flower/pull/2788), " -"[#2811](https://github.com/adap/flower/pull/2811), " -"[#2774](https://github.com/adap/flower/pull/2774), " -"[#2790](https://github.com/adap/flower/pull/2790), " -"[#2751](https://github.com/adap/flower/pull/2751), " -"[#2850](https://github.com/adap/flower/pull/2850), " -"[#2812](https://github.com/adap/flower/pull/2812), " -"[#2872](https://github.com/adap/flower/pull/2872), " -"[#2736](https://github.com/adap/flower/pull/2736), " -"[#2756](https://github.com/adap/flower/pull/2756), " -"[#2857](https://github.com/adap/flower/pull/2857), " -"[#2757](https://github.com/adap/flower/pull/2757), " -"[#2810](https://github.com/adap/flower/pull/2810), " -"[#2740](https://github.com/adap/flower/pull/2740), " -"[#2789](https://github.com/adap/flower/pull/2789))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:236 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**General improvements** " -"([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " -"[#2847](https://github.com/adap/flower/pull/2847), " -"[#2877](https://github.com/adap/flower/pull/2877), " -"[#2690](https://github.com/adap/flower/pull/2690), " -"[#2889](https://github.com/adap/flower/pull/2889), " -"[#2874](https://github.com/adap/flower/pull/2874), " -"[#2819](https://github.com/adap/flower/pull/2819), " -"[#2689](https://github.com/adap/flower/pull/2689), " -"[#2457](https://github.com/adap/flower/pull/2457), " -"[#2870](https://github.com/adap/flower/pull/2870), " -"[#2669](https://github.com/adap/flower/pull/2669), " -"[#2876](https://github.com/adap/flower/pull/2876), " -"[#2885](https://github.com/adap/flower/pull/2885), " -"[#2858](https://github.com/adap/flower/pull/2858), " -"[#2867](https://github.com/adap/flower/pull/2867), " -"[#2351](https://github.com/adap/flower/pull/2351), " -"[#2886](https://github.com/adap/flower/pull/2886), " -"[#2860](https://github.com/adap/flower/pull/2860), " -"[#2828](https://github.com/adap/flower/pull/2828), " -"[#2869](https://github.com/adap/flower/pull/2869), " -"[#2875](https://github.com/adap/flower/pull/2875), " -"[#2733](https://github.com/adap/flower/pull/2733), " -"[#2488](https://github.com/adap/flower/pull/2488), " -"[#2646](https://github.com/adap/flower/pull/2646), " -"[#2879](https://github.com/adap/flower/pull/2879), " -"[#2821](https://github.com/adap/flower/pull/2821), " -"[#2855](https://github.com/adap/flower/pull/2855), " -"[#2800](https://github.com/adap/flower/pull/2800), " -"[#2807](https://github.com/adap/flower/pull/2807), " -"[#2801](https://github.com/adap/flower/pull/2801), " -"[#2804](https://github.com/adap/flower/pull/2804), " -"[#2851](https://github.com/adap/flower/pull/2851), " -"[#2787](https://github.com/adap/flower/pull/2787), " -"[#2852](https://github.com/adap/flower/pull/2852), " -"[#2672](https://github.com/adap/flower/pull/2672), " -"[#2759](https://github.com/adap/flower/pull/2759))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:240 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Deprecate** `start_numpy_client` " -"([#2563](https://github.com/adap/flower/pull/2563), " -"[#2718](https://github.com/adap/flower/pull/2718))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:242 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Until now, clients of type `NumPyClient` needed to be started via " -"`start_numpy_client`. In our efforts to consolidate framework APIs, we " -"have introduced changes, and now all client types should start via " -"`start_client`. To continue using `NumPyClient` clients, you simply need " -"to first call the `.to_client()` method and then pass returned `Client` " -"object to `start_client`. The examples and the documentation have been " -"updated accordingly." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " -"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " -"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " -"`start_client`。示例和文档已相应更新。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:244 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Deprecate legacy DP wrappers** " -"([#2749](https://github.com/adap/flower/pull/2749))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:246 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Legacy DP wrapper classes are deprecated, but still functional. This is " -"in preparation for an all-new pluggable version of differential privacy " -"support in Flower." -msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:248 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Make optional arg** `--callable` **in** `flower-client` **a required " -"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`num_fit_clients `\\" +" \\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:250 +#: ../../source/ref-api/flwr.server.strategy.FedProx.rst:2 #, fuzzy +msgid "FedProx" +msgstr "FedProx" + +#: flwr.server.strategy.fedprox.FedProx:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "实施基于 https://arxiv.org/abs/1812.06127" + +#: flwr.server.strategy.fedprox.FedProx:5 of msgid "" -"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " -"([#2890](https://github.com/adap/flower/pull/2890))" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "策略本身与 FedAvg 并无不同,客户端需要进行调整。在训练过程中,需要在损失函数中添加一个近端项:" + +#: flwr.server.strategy.fedprox.FedProx:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" -#: ../../source/ref-changelog.md:252 -#, fuzzy +#: flwr.server.strategy.fedprox.FedProx:12 of msgid "" -"**Drop experimental** `Task` **fields** " -"([#2866](https://github.com/adap/flower/pull/2866), " -"[#2865](https://github.com/adap/flower/pull/2865))" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "其中,$w^t$ 是全局参数,$w$ 是优化函数的局部权重。" + +#: flwr.server.strategy.fedprox.FedProx:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "例如,在 PyTorch 中,损失将从:" + +#: flwr.server.strategy.fedprox.FedProx:21 of +msgid "To:" +msgstr "致:" + +#: flwr.server.strategy.fedprox.FedProx:30 of +msgid "" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "其中,\"global_params \"是训练前的参数副本。" + +#: flwr.server.strategy.fedprox.FedProx:65 of +msgid "" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +"优化中使用的近端项权重。0.0 使该策略等同于 " +"FedAvg,系数越大,使用的正则化就越多(也就是说,在训练过程中,客户端参数需要更接近服务器参数)。" -#: ../../source/ref-changelog.md:254 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Experimental fields `sa`, `legacy_server_message` and " -"`legacy_client_message` were removed from `Task` message. The removed " -"fields are superseded by the new `RecordSet` abstraction." +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " -"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:256 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Retire MXNet examples** " -"([#2724](https://github.com/adap/flower/pull/2724))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" +msgstr "" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:258 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"The development of the MXNet fremework has ended and the project is now " -"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " -"examples won't receive updates." +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " -"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:260 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy -msgid "v1.6.0 (2023-11-28)" -msgstr "v1.4.0 (2023-04-21)" +msgid "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:266 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " -"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " -"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," -" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " -"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " -"`cnxdeveloper`, `k3nfalt` " +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:270 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add experimental support for Python 3.12** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"** 增加对 Python 3.12 的实验支持** " -"([#2565](https://github.com/adap/flower/pull/2565))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:272 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Add new XGBoost examples** " -"([#2612](https://github.com/adap/flower/pull/2612), " -"[#2554](https://github.com/adap/flower/pull/2554), " -"[#2617](https://github.com/adap/flower/pull/2617), " -"[#2618](https://github.com/adap/flower/pull/2618), " -"[#2619](https://github.com/adap/flower/pull/2619), " -"[#2567](https://github.com/adap/flower/pull/2567))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:274 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"We have added a new `xgboost-quickstart` example alongside a new " -"`xgboost-comprehensive` example that goes more in-depth." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " -"\"示例,后者更加深入。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "向客户发送近端因子mu" -#: ../../source/ref-changelog.md:276 +#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst:2 #, fuzzy -msgid "" -"**Add Vertical FL example** " -"([#2598](https://github.com/adap/flower/pull/2598))" -msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" +msgid "FedTrimmedAvg" +msgstr "server.strategy.FedTrimmedAvg" -#: ../../source/ref-changelog.md:278 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of #, fuzzy -msgid "" -"We had many questions about Vertical Federated Learning using Flower, so " -"we decided to add an simple example for it on the [Titanic " -"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " -"tutorial (in the README)." -msgstr "" -"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " -"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" -" README 中)。" +msgid "Implemented based on: https://arxiv.org/abs/1803.01498" +msgstr "实施基于 https://arxiv.org/abs/1802.07927。" -#: ../../source/ref-changelog.md:280 -msgid "" -"**Support custom** `ClientManager` **in** `start_driver()` " -"([#2292](https://github.com/adap/flower/pull/2292))" -msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:25 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "截取分布两个尾部的分数。默认为 0.2。" -#: ../../source/ref-changelog.md:282 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Update REST API to support create and delete nodes** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**更新 REST API 以支持创建和删除节点** " -"([#2283](https://github.com/adap/flower/pull/2283))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:284 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update the Android SDK** " -"([#2187](https://github.com/adap/flower/pull/2187))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:286 -#, fuzzy -msgid "Add gRPC request-response capability to the Android SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "使用修剪平均值汇总拟合结果。" -#: ../../source/ref-changelog.md:288 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Update the C++ SDK** " -"([#2537](https://github.com/adap/flower/pull/2537), " -"[#2528](https://github.com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" - -#: ../../source/ref-changelog.md:290 -msgid "Add gRPC request-response capability to the C++ SDK." -msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:292 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"**Make HTTPS the new default** " -"([#2591](https://github.com/adap/flower/pull/2591), " -"[#2636](https://github.com/adap/flower/pull/2636))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:294 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Flower is moving to HTTPS by default. The new `flower-server` requires " -"passing `--certificates`, but users can enable `--insecure` to use HTTP " -"for prototyping. The same applies to `flower-client`, which can either " -"use user-provided credentials or gRPC-bundled certificates to connect to " -"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " -"enable insecure HTTP connections." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " -"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " -"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:296 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"For backward compatibility, `start_client()` and `start_numpy_client()` " -"will still start in insecure mode by default. In a future release, " -"insecure connections will require user opt-in by passing `insecure=True`." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"为了向后兼容,`start_client()` 和 `start_numpy_client()` " -"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:298 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " -"[#2390](https://github.com/adap/flower/pull/2390), " -"[#2493](https://github.com/adap/flower/pull/2493))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:300 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Using the `client_fn`, Flower clients can interchangeably run as " -"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" -" `start_simulation`) without requiring changes to how the client class is" -" defined and instantiated. The `to_client()` function is introduced to " -"convert a `NumPyClient` to a `Client`." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " -"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:302 -msgid "" -"**Add new** `Bulyan` **strategy** " -"([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891))" -msgstr "" -"**添加新**\"Bulyan " -"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " -"[#1891](https://github.com/adap/flower/pull/1891)" +#: ../../source/ref-api/flwr.server.strategy.FedXgbBagging.rst:2 +#, fuzzy +msgid "FedXgbBagging" +msgstr "FedXgbBagging" -#: ../../source/ref-changelog.md:304 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " -"2018](https://arxiv.org/abs/1802.07927)" -msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" +msgstr "" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:306 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy -msgid "" -"**Add new** `XGB Bagging` **strategy** " -"([#2611](https://github.com/adap/flower/pull/2611))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +msgid "Aggregate evaluation metrics using average." +msgstr "采用加权平均法计算评估损失总额。" -#: ../../source/ref-changelog.md:308 ../../source/ref-changelog.md:310 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**Introduce `WorkloadState`** " -"([#2564](https://github.com/adap/flower/pull/2564), " -"[#2632](https://github.com/adap/flower/pull/2632))" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +":py:obj:`aggregate_fit " +"`\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:314 -msgid "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" -msgstr "" -"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " -"[#2286](https://github.com/adap/flower/pull/2286), " -"[#2509](https://github.com/adap/flower/pull/2509))" +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_fit:1 of +#, fuzzy +msgid "Aggregate fit results using bagging." +msgstr "使用 Bulyan 技术汇总拟合结果。" -#: ../../source/ref-changelog.md:316 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " -"[#2400](https://github.com/adap/flower/pull/2400)" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:318 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " -"[#2507](https://github.com/adap/flower/pull/2507))" +":py:obj:`configure_fit " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:320 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " -"[#2508](https://github.com/adap/flower/pull/2508))" - -#: ../../source/ref-changelog.md:322 -msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" - -#: ../../source/ref-changelog.md:324 -msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" - -#: ../../source/ref-changelog.md:326 -msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" -msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" - -#: ../../source/ref-changelog.md:328 -msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" - -#: ../../source/ref-changelog.md:330 -msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" - -#: ../../source/ref-changelog.md:332 -msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" - -#: ../../source/ref-changelog.md:334 -msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:336 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " -"[#2615](https://github.com/adap/flower/pull/2615))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:338 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Examples** " -"([#2384](https://github.com/adap/flower/pull/2384), " -"[#2425](https://github.com/adap/flower/pull/2425), " -"[#2526](https://github.com/adap/flower/pull/2526), " -"[#2302](https://github.com/adap/flower/pull/2302), " -"[#2545](https://github.com/adap/flower/pull/2545))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " -"[#2528](https://github/com/adap/flower/pull/2528), " -"[#2523](https://github.com/adap/flower/pull/2523), " -"[#2522](https://github.com/adap/flower/pull/2522))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:340 +#: flwr.server.strategy.fedxgb_bagging.FedXgbBagging.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower Baselines** " -"([#2301](https://github.com/adap/flower/pull/2301), " -"[#2305](https://github.com/adap/flower/pull/2305), " -"[#2307](https://github.com/adap/flower/pull/2307), " -"[#2327](https://github.com/adap/flower/pull/2327), " -"[#2435](https://github.com/adap/flower/pull/2435), " -"[#2462](https://github.com/adap/flower/pull/2462), " -"[#2463](https://github.com/adap/flower/pull/2463), " -"[#2461](https://github.com/adap/flower/pull/2461), " -"[#2469](https://github.com/adap/flower/pull/2469), " -"[#2466](https://github.com/adap/flower/pull/2466), " -"[#2471](https://github.com/adap/flower/pull/2471), " -"[#2472](https://github.com/adap/flower/pull/2472), " -"[#2470](https://github.com/adap/flower/pull/2470))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," -" [#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446) " -"[#2561](https://github.com/adap/flower/pull/2561))" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:342 +#: ../../source/ref-api/flwr.server.strategy.FedXgbCyclic.rst:2 #, fuzzy -msgid "" -"**General updates to the simulation engine** " -"([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448), " -"[#2294](https://github.com/adap/flower/pull/2294))" -msgstr "" -"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " -"[#2447](https://github.com/adap/flower/pull/2447), " -"[#2448](https://github.com/adap/flower/pull/2448))" +msgid "FedXgbCyclic" +msgstr "FedXgbCyclic" -#: ../../source/ref-changelog.md:344 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General updates to Flower SDKs** " -"([#2288](https://github.com/adap/flower/pull/2288), " -"[#2429](https://github.com/adap/flower/pull/2429), " -"[#2555](https://github.com/adap/flower/pull/2555), " -"[#2543](https://github.com/adap/flower/pull/2543), " -"[#2544](https://github.com/adap/flower/pull/2544), " -"[#2597](https://github.com/adap/flower/pull/2597), " -"[#2623](https://github.com/adap/flower/pull/2623))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:346 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of #, fuzzy msgid "" -"**General improvements** " -"([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" msgstr "" -"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " -"[#2310](https://github.com/adap/flower/pull/2310), " -"[#2313](https://github.com/adap/flower/pull/2313), " -"[#2316](https://github.com/adap/flower/pull/2316), " -"[#2317](https://github.com/adap/flower/pull/2317), " -"[#2349](https://github.com/adap/flower/pull/2349), " -"[#2360](https://github.com/adap/flower/pull/2360), " -"[#2402](https://github.com/adap/flower/pull/2402), " -"[#2446](https://github.com/adap/flower/pull/2446), " -"[#2561](https://github.com/adap/flower/pull/2561), " -"[#2273](https://github.com/adap/flower/pull/2273), " -"[#2267](https://github.com/adap/flower/pull/2267), " -"[#2274](https://github.com/adap/flower/pull/2274), " -"[#2275](https://github.com/adap/flower/pull/2275), " -"[#2432](https://github.com/adap/flower/pull/2432), " -"[#2251](https://github.com/adap/flower/pull/2251), " -"[#2321](https://github.com/adap/flower/pull/2321), " -"[#1936](https://github.com/adap/flower/pull/1936), " -"[#2408](https://github.com/adap/flower/pull/2408), " -"[#2413](https://github.com/adap/flower/pull/2413), " -"[#2401](https://github.com/adap/flower/pull/2401), " -"[#2531](https://github.com/adap/flower/pull/2531), " -"[#2534](https://github.com/adap/flower/pull/2534), " -"[#2535](https://github.com/adap/flower/pull/2535), " -"[#2521](https://github.com/adap/flower/pull/2521), " -"[#2553](https://github.com/adap/flower/pull/2553), " -"[#2596](https://github.com/adap/flower/pull/2596))" - -#: ../../source/ref-changelog.md:348 ../../source/ref-changelog.md:438 -#: ../../source/ref-changelog.md:502 ../../source/ref-changelog.md:556 -#: ../../source/ref-changelog.md:623 -msgid "Flower received many improvements under the hood, too many to list here." -msgstr "Flower 进行了许多改进,这里就不一一列举了。" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\," +" results\\, failures\\)" -#: ../../source/ref-changelog.md:352 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Remove support for Python 3.7** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**移除对 Python 3.7 的支持** " -"([#2280](https://github.com/adap/flower/pull/2280), " -"[#2299](https://github.com/adap/flower/pull/2299), " -"[#2304](https://github.com/adap/flower/pull/2304), " -"[#2306](https://github.com/adap/flower/pull/2306), " -"[#2355](https://github.com/adap/flower/pull/2355), " -"[#2356](https://github.com/adap/flower/pull/2356))" - -#: ../../source/ref-changelog.md:354 -msgid "" -"Python 3.7 support was deprecated in Flower 1.5, and this release removes" -" support. Flower now requires Python 3.8." -msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:356 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Remove experimental argument** `rest` **from** `start_client` " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**从** `start_client` 中移除** `rest` **实验参数 " -"([#2324](https://github.com/adap/flower/pull/2324))" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:358 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The (still experimental) argument `rest` was removed from `start_client` " -"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " -"experimental REST API instead." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " -"`transport=\"rest\"` 来选择使用试验性 REST API。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:360 -msgid "v1.5.0 (2023-08-31)" -msgstr "v1.5.0 (2023-08-31)" - -#: ../../source/ref-changelog.md:366 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " -"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " -"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " -"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:370 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce new simulation engine** " -"([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " -"[#2221](https://github.com/adap/flower/pull/2221), " -"[#2248](https://github.com/adap/flower/pull/2248))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:372 +#: flwr.server.strategy.fedxgb_cyclic.FedXgbCyclic.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"The new simulation engine has been rewritten from the ground up, yet it " -"remains fully backwards compatible. It offers much improved stability and" -" memory handling, especially when working with GPUs. Simulations " -"transparently adapt to different settings to scale simulation in CPU-" -"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " -"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" + +#: ../../source/ref-api/flwr.server.strategy.FedXgbNnAvg.rst:2 +#, fuzzy +msgid "FedXgbNnAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:374 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:5 of +#, fuzzy msgid "" -"Comprehensive documentation includes a new [how-to run " -"simulations](https://flower.ai/docs/framework/how-to-run-" -"simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +"This strategy is deprecated, but a copy of it is available in Flower " +"Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost." msgstr "" -"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" -"run-simulations.html) guide, new [simulation-" -"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " -"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" -"tensorflow.html) notebooks, and a new [YouTube tutorial " -"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" +"该策略已被弃用,但在 Flower Baselines: " +"https://github.com/adap/flower/tree/main/baselines/hfedxgboost 中有其副本。" -#: ../../source/ref-changelog.md:376 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Restructure Flower Docs** " -"([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " -"[#1865](https://github.com/adap/flower/pull/1865), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1887](https://github.com/adap/flower/pull/1887), " -"[#1919](https://github.com/adap/flower/pull/1919), " -"[#1922](https://github.com/adap/flower/pull/1922), " -"[#1920](https://github.com/adap/flower/pull/1920), " -"[#1923](https://github.com/adap/flower/pull/1923), " -"[#1924](https://github.com/adap/flower/pull/1924), " -"[#1962](https://github.com/adap/flower/pull/1962), " -"[#2006](https://github.com/adap/flower/pull/2006), " -"[#2133](https://github.com/adap/flower/pull/2133), " -"[#2203](https://github.com/adap/flower/pull/2203), " -"[#2215](https://github.com/adap/flower/pull/2215), " -"[#2122](https://github.com/adap/flower/pull/2122), " -"[#2223](https://github.com/adap/flower/pull/2223), " -"[#2219](https://github.com/adap/flower/pull/2219), " -"[#2232](https://github.com/adap/flower/pull/2232), " -"[#2233](https://github.com/adap/flower/pull/2233), " -"[#2234](https://github.com/adap/flower/pull/2234), " -"[#2235](https://github.com/adap/flower/pull/2235), " -"[#2237](https://github.com/adap/flower/pull/2237), " -"[#2238](https://github.com/adap/flower/pull/2238), " -"[#2242](https://github.com/adap/flower/pull/2242), " -"[#2231](https://github.com/adap/flower/pull/2231), " -"[#2243](https://github.com/adap/flower/pull/2243), " -"[#2227](https://github.com/adap/flower/pull/2227))" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:378 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of #, fuzzy msgid "" -"Much effort went into a completely restructured Flower docs experience. " -"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " -"divided into Flower Framework, Flower Baselines, Flower Android SDK, " -"Flower iOS SDK, and code example projects." +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" msgstr "" -"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " -"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" +":py:obj:`aggregate_fit " +"`\\ \\(server\\_round\\, " +"results\\, failures\\)" -#: ../../source/ref-changelog.md:380 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**介绍 Flower Swift SDK** " -"([#1858](https://github.com/adap/flower/pull/1858), " -"[#1897](https://github.com/adap/flower/pull/1897))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:382 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"This is the first preview release of the Flower Swift SDK. Flower support" -" on iOS is improving, and alongside the Swift SDK and code example, there" -" is now also an iOS quickstart tutorial." +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" msgstr "" -"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " -"和代码示例外,现在还有 iOS 快速入门教程。" +":py:obj:`configure_fit " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:384 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**介绍Flower Android SDK** " -"([#2131](https://github.com/adap/flower/pull/2131))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:386 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"This is the first preview release of the Flower Kotlin SDK. Flower " -"support on Android is improving, and alongside the Kotlin SDK and code " -"example, there is now also an Android quickstart tutorial." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " -"和代码示例,现在还有 Android 快速入门教程。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:388 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce new end-to-end testing infrastructure** " -"([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " -"[#2071](https://github.com/adap/flower/pull/2071), " -"[#2072](https://github.com/adap/flower/pull/2072), " -"[#2068](https://github.com/adap/flower/pull/2068), " -"[#2067](https://github.com/adap/flower/pull/2067), " -"[#2069](https://github.com/adap/flower/pull/2069), " -"[#2073](https://github.com/adap/flower/pull/2073), " -"[#2070](https://github.com/adap/flower/pull/2070), " -"[#2074](https://github.com/adap/flower/pull/2074), " -"[#2082](https://github.com/adap/flower/pull/2082), " -"[#2084](https://github.com/adap/flower/pull/2084), " -"[#2093](https://github.com/adap/flower/pull/2093), " -"[#2109](https://github.com/adap/flower/pull/2109), " -"[#2095](https://github.com/adap/flower/pull/2095), " -"[#2140](https://github.com/adap/flower/pull/2140), " -"[#2137](https://github.com/adap/flower/pull/2137), " -"[#2165](https://github.com/adap/flower/pull/2165))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:390 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"A new testing infrastructure ensures that new changes stay compatible " -"with existing framework integrations or strategies." -msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" - -#: ../../source/ref-changelog.md:392 -msgid "**Deprecate Python 3.7**" -msgstr "** 过时的 Python 3.7**" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:394 -msgid "" -"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" -" Python 3.7 is now deprecated and will be removed in an upcoming release." -msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" +#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst:2 +#, fuzzy +msgid "FedYogi" +msgstr "FedYogi" + +#: flwr.server.strategy.fedyogi.FedYogi:32 of +#, fuzzy +msgid "Server-side learning rate. Defaults to 1e-2." +msgstr "服务器端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedyogi.FedYogi:34 of +#, fuzzy +msgid "Client-side learning rate. Defaults to 0.0316." +msgstr "客户端学习率。默认为 1e-1。" + +#: flwr.server.strategy.fedyogi.FedYogi:40 of +#, fuzzy +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-3." +msgstr "控制算法的适应度。默认为 1e-9。" -#: ../../source/ref-changelog.md:396 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add new** `FedTrimmedAvg` **strategy** " -"([#1769](https://github.com/adap/flower/pull/1769), " -"[#1853](https://github.com/adap/flower/pull/1853))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," -" [#1853](https://github.com/adap/flower/pull/1853)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:398 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " -"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:400 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce start_driver** " -"([#1697](https://github.com/adap/flower/pull/1697))" -msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:402 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"In addition to `start_server` and using the raw Driver API, there is a " -"new `start_driver` function that allows for running `start_server` " -"scripts as a Flower driver with only a single-line code change. Check out" -" the `mt-pytorch` code example to see a working example using " -"`start_driver`." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " -"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " -"`start_driver` 的工作示例。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:404 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add parameter aggregation to** `mt-pytorch` **code example** " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"为 `mt-pytorch` **代码示例**添加参数聚合 " -"([#1785](https://github.com/adap/flower/pull/1785))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:406 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The `mt-pytorch` example shows how to aggregate parameters when writing a" -" driver script. The included `driver.py` and `server.py` have been " -"aligned to demonstrate both the low-level way and the high-level way of " -"building server-side logic." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " -"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:408 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Migrate experimental REST API to Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**将实验性 REST API 移植到 Starlette** " -"([2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:410 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (experimental) REST API used to be implemented in " -"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" -" use [Starlette](https://www.starlette.io/) directly." +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " -"[Starlette](https://www.starlette.io/) 。" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:412 +#: ../../source/ref-api/flwr.server.strategy.Krum.rst:2 +#, fuzzy +msgid "Krum" +msgstr "Krum" + +#: flwr.server.strategy.krum.Krum:3 of +#, fuzzy +msgid "Implementation based on https://arxiv.org/abs/1703.02757" +msgstr "实施基于 https://arxiv.org/abs/2304.07537。" + +#: flwr.server.strategy.krum.Krum:17 of msgid "" -"Please note: The REST request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "求平均值前保留的客户端数量(MultiKrum)。默认值为 0,在这种情况下会应用经典 Krum。" -#: ../../source/ref-changelog.md:414 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce experimental gRPC request-response API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" msgstr "" -"**引入实验性 gRPC 请求-响应 API** " -"([#1867](https://github.com/adap/flower/pull/1867), " -"[#1901](https://github.com/adap/flower/pull/1901)" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\, " +"results\\, ...\\)" -#: ../../source/ref-changelog.md:416 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"In addition to the existing gRPC API (based on bidirectional streaming) " -"and the experimental REST API, there is now a new gRPC API that uses a " -"request-response model to communicate with client nodes." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " -"应用程序接口,它使用请求-响应模型与客户端节点通信。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" + +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "使用 Krum 汇总拟合结果。" -#: ../../source/ref-changelog.md:418 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Please note: The gRPC request-response API is still experimental and will" -" likely change significantly over time." -msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" +msgstr "" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\, " +"parameters\\, ...\\)" -#: ../../source/ref-changelog.md:420 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Replace the experimental** `start_client(rest=True)` **with the new** " -"`start_client(transport=\"rest\")` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**用新的** `start_client(transport=\"rest\")` 替换实验性** " -"`start_client(rest=True)` " -"([#1880](https://github.com/adap/flower/pull/1880))" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:422 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"The (experimental) `start_client` argument `rest` was deprecated in " -"favour of a new argument `transport`. `start_client(transport=\"rest\")` " -"will yield the same behaviour as `start_client(rest=True)` did before. " -"All code should migrate to the new argument `transport`. The deprecated " -"argument `rest` will be removed in a future release." +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" -" `transport`。过时的参数 `rest` 将在今后的版本中删除。" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:424 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Add a new gRPC option** " -"([#2197](https://github.com/adap/flower/pull/2197))" -msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:426 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" -" option set to 0 by default. This prevents the clients from sending " -"keepalive pings when there is no outstanding stream." +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " -"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:428 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Improve example notebooks** " -"([#2005](https://github.com/adap/flower/pull/2005))" -msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" +msgstr "" +":py:obj:`num_fit_clients `\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:430 -msgid "There's a new 30min Federated Learning PyTorch tutorial!" -msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" +#: ../../source/ref-api/flwr.server.strategy.QFedAvg.rst:2 +#, fuzzy +msgid "QFedAvg" +msgstr "DP-FedAvg" -#: ../../source/ref-changelog.md:432 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" msgstr "" -"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " -"[#1873](https://github.com/adap/flower/pull/1873), " -"[#1981](https://github.com/adap/flower/pull/1981), " -"[#1988](https://github.com/adap/flower/pull/1988), " -"[#1984](https://github.com/adap/flower/pull/1984), " -"[#1982](https://github.com/adap/flower/pull/1982), " -"[#2112](https://github.com/adap/flower/pull/2112), " -"[#2144](https://github.com/adap/flower/pull/2144), " -"[#2174](https://github.com/adap/flower/pull/2174), " -"[#2225](https://github.com/adap/flower/pull/2225), " -"[#2183](https://github.com/adap/flower/pull/2183))" +":py:obj:`aggregate_evaluate " +"`\\ \\(server\\_round\\," +" results\\, ...\\)" -#: ../../source/ref-changelog.md:434 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"Many examples have received significant updates, including simplified " -"advanced-tensorflow and advanced-pytorch examples, improved macOS " -"compatibility of TensorFlow examples, and code examples for simulation. A" -" major upgrade is that all code examples now have a `requirements.txt` " -"(in addition to `pyproject.toml`)." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " -"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " -"\"requirements.txt\"(除 \"pyproject.toml \"外)。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:436 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**General improvements** " -"([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" msgstr "" -"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " -"[#1866](https://github.com/adap/flower/pull/1866), " -"[#1884](https://github.com/adap/flower/pull/1884), " -"[#1837](https://github.com/adap/flower/pull/1837), " -"[#1477](https://github.com/adap/flower/pull/1477), " -"[#2171](https://github.com/adap/flower/pull/2171))" - -#: ../../source/ref-changelog.md:444 -msgid "v1.4.0 (2023-04-21)" -msgstr "v1.4.0 (2023-04-21)" +":py:obj:`configure_evaluate " +"`\\ \\(server\\_round\\," +" parameters\\, ...\\)" -#: ../../source/ref-changelog.md:450 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " -"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " -"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " -"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " -"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:454 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " -"example)** ([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " -"([#1694](https://github.com/adap/flower/pull/1694), " -"[#1709](https://github.com/adap/flower/pull/1709), " -"[#1715](https://github.com/adap/flower/pull/1715), " -"[#1717](https://github.com/adap/flower/pull/1717), " -"[#1763](https://github.com/adap/flower/pull/1763), " -"[#1795](https://github.com/adap/flower/pull/1795))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:456 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"XGBoost is a tree-based ensemble machine learning algorithm that uses " -"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" -" " -"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," -" and a [code example](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart) that demonstrates the usage of this new strategy in " -"an XGBoost project." +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" msgstr "" -"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " -"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" -"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:458 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"**Introduce iOS SDK (preview)** " -"([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " -"[#1764](https://github.com/adap/flower/pull/1764))" +":py:obj:`num_evaluation_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:460 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1::1 of +#, fuzzy msgid "" -"This is a major update for anyone wanting to implement Federated Learning" -" on iOS mobile devices. We now have a swift iOS SDK present under " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" that will facilitate greatly the app creating process. To showcase its " -"use, the [iOS " -"example](https://github.com/adap/flower/tree/main/examples/ios) has also " -"been updated!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" msgstr "" -"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " -"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" -" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " -"示例](https://github.com/adap/flower/tree/main/examples/ios)!" +":py:obj:`num_fit_clients " +"`\\ " +"\\(num\\_available\\_clients\\)" -#: ../../source/ref-changelog.md:462 -msgid "" -"**Introduce new \"What is Federated Learning?\" tutorial** " -"([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721))" -msgstr "" -"**引入新的 " -"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " -"[#1721](https://github.com/adap/flower/pull/1721)" +#: ../../source/ref-api/flwr.server.strategy.Strategy.rst:2 +#, fuzzy +msgid "Strategy" +msgstr "Krum 策略。" -#: ../../source/ref-changelog.md:464 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" -"what-is-federated-learning.html) in our documentation explains the basics" -" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" -" Learning to start their journey with Flower. Forward it to anyone who's " -"interested in Federated Learning!" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" msgstr "" -"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" -"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " -"之旅。请转发给对联邦学习感兴趣的人!" +":py:obj:`aggregate_evaluate " +"`\\ " +"\\(server\\_round\\, results\\, ...\\)" -#: ../../source/ref-changelog.md:466 -msgid "" -"**Introduce new Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679))" -msgstr "" -"**引入新的 Flower Baseline: FedProx MNIST** " -"([#1513](https://github.com/adap/flower/pull/1513), " -"[#1680](https://github.com/adap/flower/pull/1680), " -"[#1681](https://github.com/adap/flower/pull/1681), " -"[#1679](https://github.com/adap/flower/pull/1679)" +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +msgid "Aggregate evaluation results." +msgstr "聚合评估结果。" -#: ../../source/ref-changelog.md:468 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"This new baseline replicates the MNIST+CNN task from the paper [Federated" -" Optimization in Heterogeneous Networks (Li et al., " -"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," -" which aims at making convergence more robust in heterogeneous settings." +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" msgstr "" -"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " -"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " -"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" +":py:obj:`aggregate_fit `\\ " +"\\(server\\_round\\, results\\, failures\\)" -#: ../../source/ref-changelog.md:470 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "汇总训练结果。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"**引入新的 Flower Baseline: FedAvg FEMNIST** " -"([#1655](https://github.com/adap/flower/pull/1655))" +":py:obj:`configure_evaluate " +"`\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:472 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"This new baseline replicates an experiment evaluating the performance of " -"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " -"Benchmark for Federated Settings (Caldas et al., " -"2018)](https://arxiv.org/abs/1812.01097)." +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" msgstr "" -"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " -"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" +":py:obj:`configure_fit `\\ " +"\\(server\\_round\\, parameters\\, ...\\)" -#: ../../source/ref-changelog.md:474 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"**Introduce (experimental) REST API** " -"([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" msgstr "" -"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " -"[#1690](https://github.com/adap/flower/pull/1690), " -"[#1695](https://github.com/adap/flower/pull/1695), " -"[#1712](https://github.com/adap/flower/pull/1712), " -"[#1802](https://github.com/adap/flower/pull/1802), " -"[#1770](https://github.com/adap/flower/pull/1770), " -"[#1733](https://github.com/adap/flower/pull/1733))" +":py:obj:`evaluate `\\ " +"\\(server\\_round\\, parameters\\)" -#: ../../source/ref-changelog.md:476 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "评估当前的模型参数。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: of +#, fuzzy msgid "" -"A new REST API has been introduced as an alternative to the gRPC-based " -"communication stack. In this initial version, the REST API only supports " -"anonymous clients." -msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" +msgstr "" +":py:obj:`initialize_parameters " +"`\\ " +"\\(client\\_manager\\)" -#: ../../source/ref-changelog.md:478 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1::1 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "初始化(全局)模型参数。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of msgid "" -"Please note: The REST API is still experimental and will likely change " -"significantly over time." -msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" +"从先前选定和配置的客户端进行的成功更新。每一对`(ClientProxy, " +"FitRes)`都是来自先前选定客户端的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,`failures`中都应该有一个`Exception`。" -#: ../../source/ref-changelog.md:480 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "服务器等待客户端更新时发生的异常。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of msgid "" -"**Improve the (experimental) Driver API** " -"([#1663](https://github.com/adap/flower/pull/1663), " -"[#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "**aggregation_result** -- 汇总的评估结果。聚合通常使用某种加权平均值。" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." msgstr "" -"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," -" [#1666](https://github.com/adap/flower/pull/1666), " -"[#1667](https://github.com/adap/flower/pull/1667), " -"[#1664](https://github.com/adap/flower/pull/1664), " -"[#1675](https://github.com/adap/flower/pull/1675), " -"[#1676](https://github.com/adap/flower/pull/1676), " -"[#1693](https://github.com/adap/flower/pull/1693), " -"[#1662](https://github.com/adap/flower/pull/1662), " -"[#1794](https://github.com/adap/flower/pull/1794))" +"来自先前选定和配置的客户端的成功更新。每一对`(ClientProxy, " +"FitRes)`都构成先前选定的客户端之一的一次成功更新。但并非所有先前选定的客户机都一定包含在此列表中:客户机可能会退出,不提交结果。对于每个没有提交更新的客户端,\"失败" +" \"中都应该有一个 \"异常\"。" -#: ../../source/ref-changelog.md:482 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of msgid "" -"The Driver API is still an experimental feature, but this release " -"introduces some major upgrades. One of the main improvements is the " -"introduction of an SQLite database to store server state on disk (instead" -" of in-memory). Another improvement is that tasks (instructions or " -"results) that have been delivered will now be deleted. This greatly " -"improves the memory efficiency of a long-running Flower server." +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." msgstr "" -"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " -"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " -"服务器的内存效率。" +"**parameters** -- 如果返回参数,那么服务器将把这些参数作为新的全局模型参数(即用本方法返回的参数替换之前的参数)。如果返回 " +"\"无\"(例如,因为只有失败而没有可行的结果),那么服务器将不再更新之前的模型参数,本轮收到的更新将被丢弃,全局模型参数保持不变。" -#: ../../source/ref-changelog.md:484 +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of msgid "" -"**Fix spilling issues related to Ray during simulations** " -"([#1698](https://github.com/adap/flower/pull/1698))" -msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "该函数可用于对模型参数进行集中(即服务器端)评估。" -#: ../../source/ref-changelog.md:486 +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of msgid "" -"While running long simulations, `ray` was sometimes spilling huge amounts" -" of data that would make the training unable to continue. This is now " -"fixed! 🎉" -msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "**evaluation_result** -- 评估结果,通常是一个元组,包含损失值和一个字典,字典中包含特定任务的指标(如准确率)。" -#: ../../source/ref-changelog.md:488 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of msgid "" -"**Add new example using** `TabNet` **and Flower** " -"([#1725](https://github.com/adap/flower/pull/1725))" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "**parameters** -- 如果返回参数,服务器将把这些参数视为初始全局模型参数。" + +#: ../../source/ref-api/flwr.server.workflow.rst:2 +#, fuzzy +msgid "workflow" +msgstr "工作流程" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy +msgid "" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" msgstr "" -"** 添加使用** `TabNet` ** 的新示例** " -"([#1725](https://github.com/adap/flower/pull/1725))" +":py:obj:`DefaultWorkflow `\\ " +"\\(\\[fit\\_workflow\\, ...\\]\\)" -#: ../../source/ref-changelog.md:490 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.default_workflows.DefaultWorkflow:1 of +#, fuzzy +msgid "Default workflow in Flower." +msgstr "Flower 中的默认工作流程。" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" -"TabNet is a powerful and flexible framework for training machine learning" -" models on tabular data. We now have a federated example using Flower: " -"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" -"/quickstart-tabnet)." +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" msgstr "" -"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" -"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" -"tabnet)。" +":py:obj:`SecAggPlusWorkflow `\\ " +"\\(num\\_shares\\, ...\\[\\, ...\\]\\)" -#: ../../source/ref-changelog.md:492 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:1 +#: of +#, fuzzy +msgid "The workflow for the SecAgg+ protocol." +msgstr "SecAgg+ 协议的工作流程。" + +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#, fuzzy msgid "" -"**Add new how-to guide for monitoring simulations** " -"([#1649](https://github.com/adap/flower/pull/1649))" -msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" +msgstr "" +":py:obj:`SecAggWorkflow `\\ " +"\\(reconstruction\\_threshold\\, \\*\\)" -#: ../../source/ref-changelog.md:494 +#: ../../source/ref-api/flwr.server.workflow.rst:24::1 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#, fuzzy +msgid "The workflow for the SecAgg protocol." +msgstr "SecAgg 协议的工作流程。" + +#: ../../source/ref-api/flwr.server.workflow.DefaultWorkflow.rst:2 +#, fuzzy +msgid "DefaultWorkflow" +msgstr "工作流程" + +#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst:2 +#, fuzzy +msgid "SecAggPlusWorkflow" +msgstr "工作流程" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:3 +#: of +#, fuzzy msgid "" -"We now have a documentation guide to help users monitor their performance" -" during simulations." -msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" +"The SecAgg+ protocol ensures the secure summation of integer vectors " +"owned by multiple parties, without accessing any individual integer " +"vector. This workflow allows the server to compute the weighted average " +"of model parameters across all clients, ensuring individual contributions" +" remain private. This is achieved by clients sending both, a weighting " +"factor and a weighted version of the locally updated parameters, both of " +"which are masked for privacy. Specifically, each client uploads \"[w, w *" +" params]\" with masks, where weighting factor 'w' is the number of " +"examples ('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." +msgstr "" +"SecAgg+ " +"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," +" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" + +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:14 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:14 +#: of +msgid "The protocol involves four main stages:" +msgstr "" -#: ../../source/ref-changelog.md:496 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:16 +#: of msgid "" -"**Add training metrics to** `History` **object during simulations** " -"([#1696](https://github.com/adap/flower/pull/1696))" +"'setup': Send SecAgg+ configuration to clients and collect their public " +"keys." msgstr "" -"**在模拟过程中为***`历史`***对象添加训练指标*** " -"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:498 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:17 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:17 +#: of msgid "" -"The `fit_metrics_aggregation_fn` can be used to aggregate training " -"metrics, but previous releases did not save the results in the `History` " -"object. This is now the case!" +"'share keys': Broadcast public keys among clients and collect encrypted " +"secret key shares." msgstr "" -"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " -"\"对象中。现在可以了!" -#: ../../source/ref-changelog.md:500 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:19 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:19 +#: of +#, fuzzy msgid "" -"**General improvements** " -"([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" -msgstr "" -"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " -"[#1646](https://github.com/adap/flower/pull/1646), " -"[#1647](https://github.com/adap/flower/pull/1647), " -"[#1471](https://github.com/adap/flower/pull/1471), " -"[#1648](https://github.com/adap/flower/pull/1648), " -"[#1651](https://github.com/adap/flower/pull/1651), " -"[#1652](https://github.com/adap/flower/pull/1652), " -"[#1653](https://github.com/adap/flower/pull/1653), " -"[#1659](https://github.com/adap/flower/pull/1659), " -"[#1665](https://github.com/adap/flower/pull/1665), " -"[#1670](https://github.com/adap/flower/pull/1670), " -"[#1672](https://github.com/adap/flower/pull/1672), " -"[#1677](https://github.com/adap/flower/pull/1677), " -"[#1684](https://github.com/adap/flower/pull/1684), " -"[#1683](https://github.com/adap/flower/pull/1683), " -"[#1686](https://github.com/adap/flower/pull/1686), " -"[#1682](https://github.com/adap/flower/pull/1682), " -"[#1685](https://github.com/adap/flower/pull/1685), " -"[#1692](https://github.com/adap/flower/pull/1692), " -"[#1705](https://github.com/adap/flower/pull/1705), " -"[#1708](https://github.com/adap/flower/pull/1708), " -"[#1711](https://github.com/adap/flower/pull/1711), " -"[#1713](https://github.com/adap/flower/pull/1713), " -"[#1714](https://github.com/adap/flower/pull/1714), " -"[#1718](https://github.com/adap/flower/pull/1718), " -"[#1716](https://github.com/adap/flower/pull/1716), " -"[#1723](https://github.com/adap/flower/pull/1723), " -"[#1735](https://github.com/adap/flower/pull/1735), " -"[#1678](https://github.com/adap/flower/pull/1678), " -"[#1750](https://github.com/adap/flower/pull/1750), " -"[#1753](https://github.com/adap/flower/pull/1753), " -"[#1736](https://github.com/adap/flower/pull/1736), " -"[#1766](https://github.com/adap/flower/pull/1766), " -"[#1760](https://github.com/adap/flower/pull/1760), " -"[#1775](https://github.com/adap/flower/pull/1775), " -"[#1776](https://github.com/adap/flower/pull/1776), " -"[#1777](https://github.com/adap/flower/pull/1777), " -"[#1779](https://github.com/adap/flower/pull/1779), " -"[#1784](https://github.com/adap/flower/pull/1784), " -"[#1773](https://github.com/adap/flower/pull/1773), " -"[#1755](https://github.com/adap/flower/pull/1755), " -"[#1789](https://github.com/adap/flower/pull/1789), " -"[#1788](https://github.com/adap/flower/pull/1788), " -"[#1798](https://github.com/adap/flower/pull/1798), " -"[#1799](https://github.com/adap/flower/pull/1799), " -"[#1739](https://github.com/adap/flower/pull/1739), " -"[#1800](https://github.com/adap/flower/pull/1800), " -"[#1804](https://github.com/adap/flower/pull/1804), " -"[#1805](https://github.com/adap/flower/pull/1805))" - -#: ../../source/ref-changelog.md:508 -msgid "v1.3.0 (2023-02-06)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:514 -msgid "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -msgstr "" -"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " -"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +"'collect masked vectors': Forward encrypted secret key shares to target " +"clients and collect masked model parameters." +msgstr "收集屏蔽向量\": 向目标客户端转发加密密钥共享,并收集屏蔽模型参数。" -#: ../../source/ref-changelog.md:518 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:21 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:21 +#: of +#, fuzzy msgid "" -"**Add support for** `workload_id` **and** `group_id` **in Driver API** " -"([#1595](https://github.com/adap/flower/pull/1595))" -msgstr "" -"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " -"([#1595](https://github.com/adap/flower/pull/1595))" +"'unmask': Collect secret key shares to decrypt and aggregate the model " +"parameters." +msgstr "解密\": 收集密钥共享,解密并汇总模型参数。" -#: ../../source/ref-changelog.md:520 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:23 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:23 +#: of +#, fuzzy msgid "" -"The (experimental) Driver API now supports a `workload_id` that can be " -"used to identify which workload a task belongs to. It also supports a new" -" `group_id` that can be used, for example, to indicate the current " -"training round. Both the `workload_id` and `group_id` enable client nodes" -" to decide whether they want to handle a task or not." -msgstr "" -"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " -"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " -"客户端节点可以决定是否要处理某个任务。" +"Only the aggregated model parameters are exposed and passed to " +"`Strategy.aggregate_fit`, ensuring individual data privacy." +msgstr "只有聚合模型参数才会公开并传递给 `Strategy.aggregate_fit`,从而确保个人数据隐私。" -#: ../../source/ref-changelog.md:522 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:26 +#: of +#, fuzzy msgid "" -"**Make Driver API and Fleet API address configurable** " -"([#1637](https://github.com/adap/flower/pull/1637))" +"The number of shares into which each client's private key is split under " +"the SecAgg+ protocol. If specified as a float, it represents the " +"proportion of all selected clients, and the number of shares will be set " +"dynamically in the run time. A private key can be reconstructed from " +"these shares, allowing for the secure aggregation of model updates. Each " +"client sends one share to each of its neighbors while retaining one." msgstr "" -"**使Driver API 和Fleet " -"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" +"在 SecAgg+ " +"协议下,每个客户的私钥被分成的份数。如果指定为浮点数,则代表所有选定客户的比例,份额数将在运行时动态设置。私钥可以从这些份额中重建,从而实现模型更新的安全聚合。每个客户端向其每个邻居发送一份,同时保留一份。" -#: ../../source/ref-changelog.md:524 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:26 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:33 +#: of +#, fuzzy msgid "" -"The (experimental) long-running Flower server (Driver API and Fleet API) " -"can now configure the server address of both Driver API (via `--driver-" -"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" -msgstr "" -"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " -"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" +"The minimum number of shares required to reconstruct a client's private " +"key, or, if specified as a float, it represents the proportion of the " +"total number of shares needed for reconstruction. This threshold ensures " +"privacy by allowing for the recovery of contributions from dropped " +"clients during aggregation, without compromising individual client data." +msgstr "重建客户私钥所需的最小份数,如果指定为浮动,则表示重建所需的份数占总份数的比例。这个阈值允许在聚合过程中恢复掉线客户的贡献,从而确保隐私,而不会泄露单个客户的数据。" -#: ../../source/ref-changelog.md:526 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:32 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:39 +#: of #, fuzzy msgid "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" -msgstr "" -"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " -"\"0.0.0.0:8086\"`" - -#: ../../source/ref-changelog.md:528 -msgid "Both IPv4 and IPv6 addresses are supported." -msgstr "支持 IPv4 和 IPv6 地址。" +"The maximum value of the weight that can be assigned to any single " +"client's update during the weighted average calculation on the server " +"side, e.g., in the FedAvg algorithm." +msgstr "在服务器端进行加权平均计算(如 FedAvg 算法)时,可分配给任何单个客户端更新的权重的最大值。" -#: ../../source/ref-changelog.md:530 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:36 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:43 +#: of +#, fuzzy msgid "" -"**Add new example of Federated Learning using fastai and Flower** " -"([#1598](https://github.com/adap/flower/pull/1598))" -msgstr "" -"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " -"([#1598](https://github.com/adap/flower/pull/1598))" +"The range within which model parameters are clipped before quantization. " +"This parameter ensures each model parameter is bounded within " +"[-clipping_range, clipping_range], facilitating quantization." +msgstr "量化前模型参数的裁剪范围。该参数可确保每个模型参数都在 [-clipping_range, clipping_range] 范围内,便于量化。" -#: ../../source/ref-changelog.md:532 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:40 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:47 +#: of +#, fuzzy msgid "" -"A new code example (`quickstart-fastai`) demonstrates federated learning " -"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " -"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" -"/quickstart-fastai)." -msgstr "" -"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " -"Flower 的联邦学习。您可以在这里找到它: [quickstart-" -"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" -"fastai)。" +"The size of the range into which floating-point model parameters are " +"quantized, mapping each parameter to an integer in [0, " +"quantization_range-1]. This facilitates cryptographic operations on the " +"model updates." +msgstr "浮点模型参数量化范围的大小,将每个参数映射为 [0, quantization_range-1] 中的整数。这有助于对模型更新进行加密操作。" -#: ../../source/ref-changelog.md:534 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:44 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:51 +#: of +#, fuzzy msgid "" -"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" -" versions of Android** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"The range of values from which random mask entries are uniformly sampled " +"([0, modulus_range-1]). `modulus_range` must be less than 4294967296. " +"Please use 2**n values for `modulus_range` to prevent overflow issues." msgstr "" -"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " -"([#1603](https://github.com/adap/flower/pull/1603))" +"对随机掩码条目进行均匀采样的数值范围([0, modulus_range-1])。modulus_range \"必须小于 " +"4294967296。为防止出现溢出问题,请为 `modulus_range` 使用 2**n 的值。" -#: ../../source/ref-changelog.md:536 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:48 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:55 +#: of +#, fuzzy msgid "" -"The Android code example has received a substantial update: the project " -"is compatible with Flower 1.0 (and later), the UI received a full " -"refresh, and the project is updated to be compatible with newer Android " -"tooling." -msgstr "" -"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" -" 工具。" +"The timeout duration in seconds. If specified, the workflow will wait for" +" replies for this duration each time. If `None`, there is no time limit " +"and the workflow will wait until replies for all messages are received." +msgstr "超时时间(秒)。如果指定,工作流将在每次等待回复的时间内等待回复。如果指定为 \"无\",则没有时间限制,工作流程将一直等待到收到所有信息的回复。" -#: ../../source/ref-changelog.md:538 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:62 +#: of +#, fuzzy msgid "" -"**Add new `FedProx` strategy** " -"([#1619](https://github.com/adap/flower/pull/1619))" -msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" +"Generally, higher `num_shares` means more robust to dropouts while " +"increasing the computational costs; higher `reconstruction_threshold` " +"means better privacy guarantees but less tolerance to dropouts." +msgstr "一般来说,\"份额数 \"越高,意味着对丢弃的鲁棒性越强,同时计算成本也会增加;\"重构阈值 \"越高,意味着隐私保证越好,但对丢弃的容忍度越低。" -#: ../../source/ref-changelog.md:540 -msgid "" -"This " -"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" -" is almost identical to " -"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," -" but helps users replicate what is described in this " -"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " -"parameter called `proximal_mu` to regularize the local models with " -"respect to the global models." -msgstr "" -"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" -" `proximal_mu`的参数,使局部模型与全局模型正则化。" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:59 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:65 +#: of +#, fuzzy +msgid "Too large `max_weight` may compromise the precision of the quantization." +msgstr "过大的 `max_weight` 可能会影响量化的精度。" -#: ../../source/ref-changelog.md:542 -msgid "" -"**Add new metrics to telemetry events** " -"([#1640](https://github.com/adap/flower/pull/1640))" -msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:60 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:66 +#: of +#, fuzzy +msgid "`modulus_range` must be 2**n and larger than `quantization_range`." +msgstr "modulus_range \"必须为 2**n,且大于 \"quantization_range\"。" -#: ../../source/ref-changelog.md:544 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:67 +#: of +#, fuzzy msgid "" -"An updated event structure allows, for example, the clustering of events " -"within the same workload." -msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" +"When `num_shares` is a float, it is interpreted as the proportion of all " +"selected clients, and hence the number of shares will be determined in " +"the runtime. This allows for dynamic adjustment based on the total number" +" of participating clients." +msgstr "当 `num_shares` 为浮点数时,它被解释为所有选定客户端的比例,因此份额数将在运行时确定。这样就可以根据参与客户端的总数进行动态调整。" -#: ../../source/ref-changelog.md:546 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:70 +#: of +#, fuzzy msgid "" -"**Add new custom strategy tutorial section** " -"[#1623](https://github.com/adap/flower/pull/1623)" -msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" +"Similarly, when `reconstruction_threshold` is a float, it is interpreted " +"as the proportion of the number of shares needed for the reconstruction " +"of a private key. This feature enables flexibility in setting the " +"security threshold relative to the number of distributed shares." +msgstr "" +"同样,当 `reconstruction_threshold` " +"为浮点数时,它被解释为重建私钥所需的份额数比例。这一功能使我们可以根据分发的份额数灵活设置安全阈值。" -#: ../../source/ref-changelog.md:548 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow:74 +#: of +#, fuzzy msgid "" -"The Flower tutorial now has a new section that covers implementing a " -"custom strategy from scratch: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"`num_shares`, `reconstruction_threshold`, and the quantization parameters" +" (`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg+" +" protocol." msgstr "" -"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +"份额数\"、\"重建阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模数范围\")在平衡 SecAgg+ " +"协议的隐私性、稳健性和效率方面发挥着关键作用。" -#: ../../source/ref-changelog.md:550 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**Add new custom serialization tutorial section** " -"([#1622](https://github.com/adap/flower/pull/1622))" -msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" +msgstr "" +":py:obj:`collect_masked_vectors_stage " +"`\\" +" \\(driver\\, ...\\)" -#: ../../source/ref-changelog.md:552 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy +msgid "Execute the 'collect masked vectors' stage." +msgstr "执行 \"收集屏蔽向量 \"阶段。" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"The Flower tutorial now has a new section that covers custom " -"serialization: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" -"/tutorial-customize-the-client-pytorch.ipynb)" +":py:obj:`setup_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:554 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.setup_stage:1 +#: of +#, fuzzy +msgid "Execute the 'setup' stage." +msgstr "执行 \"设置 \"阶段。" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**General improvements** " -"([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github.com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github.com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github.com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" msgstr "" -"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " -"[#1634](https://github.com/adap/flower/pull/1634), " -"[#1636](https://github.com/adap/flower/pull/1636), " -"[#1635](https://github.com/adap/flower/pull/1635), " -"[#1633](https://github.com/adap/flower/pull/1633), " -"[#1632](https://github.com/adap/flower/pull/1632), " -"[#1631](https://github.com/adap/flower/pull/1631), " -"[#1630](https://github.com/adap/flower/pull/1630), " -"[#1627](https://github. com/adap/flower/pull/1627), " -"[#1593](https://github.com/adap/flower/pull/1593), " -"[#1616](https://github.com/adap/flower/pull/1616), " -"[#1615](https://github.com/adap/flower/pull/1615), " -"[#1607](https://github.com/adap/flower/pull/1607), " -"[#1609](https://github.com/adap/flower/pull/1609), " -"[#1608](https://github.com/adap/flower/pull/1608), " -"[#1603](https://github.com/adap/flower/pull/1603), " -"[#1590](https://github. com/adap/flower/pull/1590), " -"[#1580](https://github.com/adap/flower/pull/1580), " -"[#1599](https://github.com/adap/flower/pull/1599), " -"[#1600](https://github.com/adap/flower/pull/1600), " -"[#1601](https://github.com/adap/flower/pull/1601), " -"[#1597](https://github.com/adap/flower/pull/1597), " -"[#1595](https://github.com/adap/flower/pull/1595), " -"[#1591](https://github.com/adap/flower/pull/1591), " -"[#1588](https://github. com/adap/flower/pull/1588), " -"[#1589](https://github.com/adap/flower/pull/1589), " -"[#1587](https://github.com/adap/flower/pull/1587), " -"[#1573](https://github.com/adap/flower/pull/1573), " -"[#1581](https://github.com/adap/flower/pull/1581), " -"[#1578](https://github.com/adap/flower/pull/1578), " -"[#1574](https://github.com/adap/flower/pull/1574), " -"[#1572](https://github.com/adap/flower/pull/1572), " -"[#1586](https://github.com/adap/flower/pull/1586))" +":py:obj:`share_keys_stage " +"`\\ " +"\\(driver\\, context\\, state\\)" -#: ../../source/ref-changelog.md:558 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.share_keys_stage:1 +#: of +#, fuzzy +msgid "Execute the 'share keys' stage." +msgstr "执行 \"共享密钥 \"阶段。" + +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**Updated documentation** " -"([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " -"[#1628](https://github.com/adap/flower/pull/1628), " -"[#1620](https://github.com/adap/flower/pull/1620), " -"[#1618](https://github.com/adap/flower/pull/1618), " -"[#1617](https://github.com/adap/flower/pull/1617), " -"[#1613](https://github.com/adap/flower/pull/1613), " -"[#1614](https://github.com/adap/flower/pull/1614)))" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:560 ../../source/ref-changelog.md:627 -msgid "" -"As usual, the documentation has improved quite a bit. It is another step " -"in our effort to make the Flower documentation the best documentation of " -"any project. Stay tuned and as always, feel free to provide feedback!" -msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.unmask_stage:1 +#: of +#, fuzzy +msgid "Execute the 'unmask' stage." +msgstr "执行 \"解除屏蔽 \"阶段。" -#: ../../source/ref-changelog.md:566 -msgid "v1.2.0 (2023-01-13)" -msgstr "v1.2.0 (2023-01-13)" +#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst:2 +#, fuzzy +msgid "SecAggWorkflow" +msgstr "工作流程" -#: ../../source/ref-changelog.md:572 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:1 of +#, fuzzy msgid "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"Bases: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`" msgstr "" -"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." -" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +"基础: " +":py:class:`~flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow`." -#: ../../source/ref-changelog.md:576 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:3 of +#, fuzzy msgid "" -"**Introduce new Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" -msgstr "" -"**引入新的 Flower Baseline: FedAvg MNIST** " -"([#1497](https://github.com/adap/flower/pull/1497), " -"[#1552](https://github.com/adap/flower/pull/1552))" - -#: ../../source/ref-changelog.md:578 -msgid "" -"Over the coming weeks, we will be releasing a number of new reference " -"implementations useful especially to FL newcomers. They will typically " -"revisit well known papers from the literature, and be suitable for " -"integration in your own application or for experimentation, in order to " -"deepen your knowledge of FL in general. Today's release is the first in " -"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" +"The SecAgg protocol ensures the secure summation of integer vectors owned" +" by multiple parties, without accessing any individual integer vector. " +"This workflow allows the server to compute the weighted average of model " +"parameters across all clients, ensuring individual contributions remain " +"private. This is achieved by clients sending both, a weighting factor and" +" a weighted version of the locally updated parameters, both of which are " +"masked for privacy. Specifically, each client uploads \"[w, w * params]\"" +" with masks, where weighting factor 'w' is the number of examples " +"('num_examples') and 'params' represents the model parameters " +"('parameters') from the client's `FitRes`. The server then aggregates " +"these contributions to compute the weighted average of model parameters." msgstr "" -"在未来几周内,我们将发布一些新的参考,特别是对 FL " -"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " -"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" -"pack-fedavg-mnist-cnn/)" - -#: ../../source/ref-changelog.md:580 -msgid "" -"**Improve GPU support in simulations** " -"([#1555](https://github.com/adap/flower/pull/1555))" -msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" +"SecAgg " +"协议可确保对多方拥有的整数向量进行安全求和,而不会访问任何单个整数向量。该工作流程允许服务器计算所有客户端模型参数的加权平均值,确保个人贡献保持私密。这可以通过客户端同时发送加权因子和本地更新参数的加权版本来实现,为了保护隐私,两者都会被屏蔽。具体来说,每个客户端都会上传带掩码的\"[w," +" w * params]\",其中加权因子 \"w \"是示例数(\"num_examples\"),\"params \"代表客户端 " +"\"FitRes \"中的模型参数(\"parameters\")。然后,服务器会汇总这些贡献,计算模型参数的加权平均值。" -#: ../../source/ref-changelog.md:582 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:16 of msgid "" -"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" -" to improve GPU support. The update includes some of the hard-earned " -"lessons from scaling simulations in GPU cluster environments. New " -"defaults make running GPU-based simulations substantially more robust." +"'setup': Send SecAgg configuration to clients and collect their public " +"keys." msgstr "" -"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " -"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/ref-changelog.md:584 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:55 of +#, fuzzy msgid "" -"**Improve GPU support in Jupyter Notebook tutorials** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" -msgstr "" -"**改进 Jupyter Notebook 教程中的 GPU 支持** " -"([#1527](https://github.com/adap/flower/pull/1527), " -"[#1558](https://github.com/adap/flower/pull/1558))" +"Each client's private key is split into N shares under the SecAgg " +"protocol, where N is the number of selected clients." +msgstr "根据 SecAgg 协议,每个客户的私人密钥被分成 N 份,其中 N 是所选客户的数量。" -#: ../../source/ref-changelog.md:586 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:57 of +#, fuzzy msgid "" -"Some users reported that Jupyter Notebooks have not always been easy to " -"use on GPU instances. We listened and made improvements to all of our " -"Jupyter notebooks! Check out the updated notebooks here:" -msgstr "" -"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " -"笔记本进行了改进!点击这里查看更新后的笔记本:" +"Generally, higher `reconstruction_threshold` means better privacy " +"guarantees but less tolerance to dropouts." +msgstr "一般来说,\"重建阈值 \"越高,隐私保证就越好,但对丢包的容忍度就越低。" -#: ../../source/ref-changelog.md:588 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:61 of +#, fuzzy msgid "" -"[An Introduction to Federated Learning](https://flower.ai/docs/framework" -"/tutorial-get-started-with-flower-pytorch.html)" +"When `reconstruction_threshold` is a float, it is interpreted as the " +"proportion of the number of all selected clients needed for the " +"reconstruction of a private key. This feature enables flexibility in " +"setting the security threshold relative to the number of selected " +"clients." msgstr "" -"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" -"flower-pytorch.html)" +"当 `reconstruction_threshold` " +"为浮点数时,它被解释为重建私钥所需的所有选定客户端数量的比例。此功能可根据所选客户端的数量灵活设置安全阈值。" -#: ../../source/ref-changelog.md:589 +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow:65 of +#, fuzzy msgid "" -"[Strategies in Federated Learning](https://flower.ai/docs/framework" -"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +"`reconstruction_threshold`, and the quantization parameters " +"(`clipping_range`, `quantization_range`, `modulus_range`) play critical " +"roles in balancing privacy, robustness, and efficiency within the SecAgg " +"protocol." msgstr "" -"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" -"learning-strategy-pytorch.html)" +"重构阈值 \"和量化参数(\"裁剪范围\"、\"量化范围\"、\"模量范围\")在 SecAgg " +"协议中平衡隐私性、鲁棒性和效率方面起着至关重要的作用。" -#: ../../source/ref-changelog.md:590 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" -"-strategy-from-scratch-pytorch.html)" +":py:obj:`collect_masked_vectors_stage " +"`\\ " +"\\(driver\\, ...\\)" msgstr "" -"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" -"scratch-pytorch.html)" +":py:obj:`collect_masked_vectors_stage " +"`\\(driver\\," +" ...\\)" -#: ../../source/ref-changelog.md:591 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" -"customize-the-client-pytorch.html)" +":py:obj:`setup_stage `\\" +" \\(driver\\, context\\, state\\)" msgstr "" -"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" -"the-client-pytorch.html)" +":py:obj:`setup_stage " +"`\\(driver\\, context\\," +" state\\)" -#: ../../source/ref-changelog.md:593 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"**Introduce optional telemetry** " -"([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584))" +":py:obj:`share_keys_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " -"[#1544](https://github.com/adap/flower/pull/1544), " -"[#1584](https://github.com/adap/flower/pull/1584)" +"py:obj:`share_keys_stage " +"`\\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:595 +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow.collect_masked_vectors_stage:1::1 +#: of +#, fuzzy msgid "" -"After a [request for " -"feedback](https://github.com/adap/flower/issues/1534) from the community," -" the Flower open-source project introduces optional collection of " -"*anonymous* usage metrics to make well-informed decisions to improve " -"Flower. Doing this enables the Flower team to understand how Flower is " -"used and what challenges users might face." +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" msgstr "" -"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " -"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +":py:obj:`unmask_stage " +"`\\ \\(driver\\, " +"context\\, state\\)" -#: ../../source/ref-changelog.md:597 -msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users who do not want to share anonymous usage metrics. " -"[Read more.](https://flower.ai/docs/telemetry.html)." -msgstr "" -"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " -"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" +#: ../../source/ref-api/flwr.simulation.rst:2 +#, fuzzy +msgid "simulation" +msgstr "运行模拟" -#: ../../source/ref-changelog.md:599 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"**Introduce (experimental) Driver API** " -"([#1520](https://github.com/adap/flower/pull/1520), " -"[#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" msgstr "" -"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," -" [#1525](https://github.com/adap/flower/pull/1525), " -"[#1545](https://github.com/adap/flower/pull/1545), " -"[#1546](https://github.com/adap/flower/pull/1546), " -"[#1550](https://github.com/adap/flower/pull/1550), " -"[#1551](https://github.com/adap/flower/pull/1551), " -"[#1567](https://github.com/adap/flower/pull/1567))" +":py:obj:`run_simulation `\\ " +"\\(server\\_app\\, client\\_app\\, ...\\)" + +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.run_simulation.run_simulation:1 of +#, fuzzy +msgid "Run a Flower App using the Simulation Engine." +msgstr "使用模拟引擎运行花朵应用程序。" -#: ../../source/ref-changelog.md:601 +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -"Flower now has a new (experimental) Driver API which will enable fully " -"programmable, async, and multi-tenant Federated Learning and Federated " -"Analytics applications. Phew, that's a lot! Going forward, the Driver API" -" will be the abstraction that many upcoming features will be built on - " -"and you can start building those things now, too." +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" -"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " -"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " -"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" +":py:obj:`start_simulation `\\ \\(\\*\\," +" client\\_fn\\[\\, ...\\]\\)" -#: ../../source/ref-changelog.md:603 -msgid "" -"The Driver API also enables a new execution mode in which the server runs" -" indefinitely. Multiple individual workloads can run concurrently and " -"start and stop their execution independent of the server. This is " -"especially useful for users who want to deploy Flower in production." +#: ../../source/ref-api/flwr.simulation.rst:18::1 +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" -"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" -" Flower 的用户来说尤其有用。" -#: ../../source/ref-changelog.md:605 -msgid "" -"To learn more, check out the `mt-pytorch` code example. We look forward " -"to you feedback!" -msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" +#: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 +#, fuzzy +msgid "run\\_simulation" +msgstr "运行模拟" -#: ../../source/ref-changelog.md:607 +#: flwr.simulation.run_simulation.run_simulation:3 of +#, fuzzy msgid "" -"Please note: *The Driver API is still experimental and will likely change" -" significantly over time.*" -msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" +"The `ServerApp` to be executed. It will send messages to different " +"`ClientApp` instances running on different (virtual) SuperNodes." +msgstr "要执行的 `ServerApp`。它将向运行在不同(虚拟)超级节点上的不同 `ClientApp`实例发送消息。" -#: ../../source/ref-changelog.md:609 +#: flwr.simulation.run_simulation.run_simulation:6 of +#, fuzzy msgid "" -"**Add new Federated Analytics with Pandas example** " -"([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535))" -msgstr "" -"** 添加新的使用 Pandas " -"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " -"[#1535](https://github.com/adap/flower/pull/1535)" +"The `ClientApp` to be executed by each of the SuperNodes. It will receive" +" messages sent by the `ServerApp`." +msgstr "由每个超级节点执行的 `ClientApp`。它将接收由 `ServerApp` 发送的信息。" -#: ../../source/ref-changelog.md:611 +#: flwr.simulation.run_simulation.run_simulation:9 of +#, fuzzy msgid "" -"A new code example (`quickstart-pandas`) demonstrates federated analytics" -" with Pandas and Flower. You can find it here: [quickstart-" -"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" -"pandas)." -msgstr "" -"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " -"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" -"/quickstart-pandas)。" +"Number of nodes that run a ClientApp. They can be sampled by a Driver in " +"the ServerApp and receive a Message describing what the ClientApp should " +"perform." +msgstr "运行 ClientApp 的节点数。它们可被 ServerApp 中的驱动程序采样,并接收描述 ClientApp 应执行的操作的信息。" + +#: flwr.simulation.run_simulation.run_simulation:12 of +#, fuzzy +msgid "A simulation backend that runs `ClientApp`s." +msgstr "运行 \"客户端应用程序 \"的模拟后台。" -#: ../../source/ref-changelog.md:613 +#: flwr.simulation.run_simulation.run_simulation:14 of msgid "" -"**Add new strategies: Krum and MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" +"'A dictionary to configure a backend. Separate dictionaries to configure " +"different elements of backend. Supported top-level keys are `init_args` " +"for values parsed to initialisation of backend, `client_resources` to " +"define the resources for clients, and `actor` to define the actor " +"parameters. Values supported in are those included by " +"`flwr.common.typing.ConfigsRecordValues`." msgstr "" -"**添加新策略: Krum 和 MultiKrum** " -"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/ref-changelog.md:615 +#: flwr.simulation.run_simulation.run_simulation:21 of +#, fuzzy msgid "" -"Edoardo, a computer science student at the Sapienza University of Rome, " -"contributed a new `Krum` strategy that enables users to easily use Krum " -"and MultiKrum in their workloads." +"A boolean to indicate whether to enable GPU growth on the main thread. " +"This is desirable if you make use of a TensorFlow model on your " +"`ServerApp` while having your `ClientApp` running on the same GPU. " +"Without enabling this, you might encounter an out-of-memory error because" +" TensorFlow, by default, allocates all GPU memory. Read more about how " +"`tf.config.experimental.set_memory_growth()` works in the TensorFlow " +"documentation: https://www.tensorflow.org/api/stable." msgstr "" -"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " -"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" +"布尔值,用于指示是否在主线程上启用 GPU 增长。如果您在 \"ServerApp \"上使用 TensorFlow 模型,同时让 " +"\"ClientApp \"在同一 GPU 上运行,则最好启用此选项。如果不启用此功能,您可能会遇到内存不足的错误,因为 TensorFlow " +"默认会分配所有 GPU 内存。有关 `tf.config.experimental.set_memory_growth()` " +"如何工作的更多信息,请参阅 TensorFlow 文档:https://www.tensorflow.org/api/stable。" -#: ../../source/ref-changelog.md:617 +#: flwr.simulation.run_simulation.run_simulation:28 of +#, fuzzy msgid "" -"**Update C++ example to be compatible with Flower v1.2.0** " -"([#1495](https://github.com/adap/flower/pull/1495))" -msgstr "" -"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " -"([#1495](https://github.com/adap/flower/pull/1495))" +"When disabled, only INFO, WARNING and ERROR log messages will be shown. " +"If enabled, DEBUG-level logs will be displayed." +msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用后,将显示 DEBUG 级日志。" -#: ../../source/ref-changelog.md:619 -msgid "" -"The C++ code example has received a substantial update to make it " -"compatible with the latest version of Flower." -msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" +#: ../../source/ref-api/flwr.simulation.start_simulation.rst:2 +#, fuzzy +msgid "start\\_simulation" +msgstr "start_simulation" -#: ../../source/ref-changelog.md:621 -msgid "" -"**General improvements** " -"([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github.com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" -msgstr "" -"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " -"[#1504](https://github.com/adap/flower/pull/1504), " -"[#1506](https://github.com/adap/flower/pull/1506), " -"[#1514](https://github.com/adap/flower/pull/1514), " -"[#1522](https://github.com/adap/flower/pull/1522), " -"[#1523](https://github.com/adap/flower/pull/1523), " -"[#1526](https://github. com/adap/flower/pull/1526), " -"[#1528](https://github.com/adap/flower/pull/1528), " -"[#1547](https://github.com/adap/flower/pull/1547), " -"[#1549](https://github.com/adap/flower/pull/1549), " -"[#1560](https://github.com/adap/flower/pull/1560), " -"[#1564](https://github.com/adap/flower/pull/1564), " -"[#1566](https://github.com/adap/flower/pull/1566))" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "更新日志" -#: ../../source/ref-changelog.md:625 +#: ../../source/ref-changelog.md:3 +#, fuzzy +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "感谢我们的贡献者" + +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"**Updated documentation** " -"([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" -msgstr "" -"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " -"[#1496](https://github.com/adap/flower/pull/1496), " -"[#1500](https://github.com/adap/flower/pull/1500), " -"[#1503](https://github.com/adap/flower/pull/1503), " -"[#1505](https://github.com/adap/flower/pull/1505), " -"[#1524](https://github.com/adap/flower/pull/1524), " -"[#1518](https://github.com/adap/flower/pull/1518), " -"[#1519](https://github.com/adap/flower/pull/1519), " -"[#1515](https://github.com/adap/flower/pull/1515))" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们要特别感谢所有为 Flower 的新版本做出贡献的人员(按 `git shortlog` 顺序排列):" -#: ../../source/ref-changelog.md:629 +#: ../../source/ref-changelog.md:9 +#, fuzzy msgid "" -"One highlight is the new [first time contributor " -"guide](https://flower.ai/docs/first-time-contributors.html): if you've " -"never contributed on GitHub before, this is the perfect place to start!" +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" -"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:635 -msgid "v1.1.0 (2022-10-31)" -msgstr "v1.1.0 (2022-10-31)" +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "可选的改进措施" -#: ../../source/ref-changelog.md:639 +#: ../../source/ref-changelog.md:13 +#, fuzzy msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made the new version of Flower possible (in `git shortlog` order):" -msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" +msgstr "" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/ref-changelog.md:641 +#: ../../source/ref-changelog.md:15 +#, fuzzy msgid "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" -msgstr "" -"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " -"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " -"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " -"`danielnugraha`, `edogab33`" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:645 +#: ../../source/ref-changelog.md:17 +#, fuzzy msgid "" -"**Introduce Differential Privacy wrappers (preview)** " -"([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" -msgstr "" -"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " -"[#1460](https://github.com/adap/flower/pull/1460))" +"**Adjust framework name in templates docstrings** " +"([#4127](https://github.com/adap/flower/pull/4127))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-changelog.md:647 +#: ../../source/ref-changelog.md:19 +#, fuzzy msgid "" -"The first (experimental) preview of pluggable Differential Privacy " -"wrappers enables easy configuration and usage of differential privacy " -"(DP). The pluggable DP wrappers enable framework-agnostic **and** " -"strategy-agnostic usage of both client-side DP and server-side DP. Head " -"over to the Flower docs, a new explainer goes into more detail." +"**Update** `flwr new` **Hugging Face template** " +"([#4169](https://github.com/adap/flower/pull/4169))" msgstr "" -"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" -" Flower 文档,新的解释器会提供更多细节。" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/ref-changelog.md:649 +#: ../../source/ref-changelog.md:21 +#, fuzzy msgid "" -"**New iOS CoreML code example** " -"([#1289](https://github.com/adap/flower/pull/1289))" +"**Fix** `flwr new` **FlowerTune template** " +"([#4123](https://github.com/adap/flower/pull/4123))" msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-changelog.md:651 +#: ../../source/ref-changelog.md:23 +#, fuzzy msgid "" -"Flower goes iOS! A massive new code example shows how Flower clients can " -"be built for iOS. The code example contains both Flower iOS SDK " -"components that can be used for many tasks, and one task example running " -"on CoreML." +"**Add buffer time after** `ServerApp` **thread initialization** " +"([#4119](https://github.com/adap/flower/pull/4119))" msgstr "" -"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " -"SDK 组件,以及在 CoreML 上运行的一个任务示例。" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/ref-changelog.md:653 +#: ../../source/ref-changelog.md:25 +#, fuzzy msgid "" -"**New FedMedian strategy** " -"([#1461](https://github.com/adap/flower/pull/1461))" -msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" +"**Handle unsuitable resources for simulation** " +"([#4143](https://github.com/adap/flower/pull/4143))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/ref-changelog.md:655 +#: ../../source/ref-changelog.md:27 +#, fuzzy msgid "" -"The new `FedMedian` strategy implements Federated Median (FedMedian) by " -"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +"**Update example READMEs** " +"([#4117](https://github.com/adap/flower/pull/4117))" msgstr "" -"新的 \"FedMedian \"战略实现了[Yin " -"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:657 +#: ../../source/ref-changelog.md:29 +#, fuzzy msgid "" -"**Log** `Client` **exceptions in Virtual Client Engine** " -"([#1493](https://github.com/adap/flower/pull/1493))" -msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" +"**Update SuperNode authentication docs** " +"([#4160](https://github.com/adap/flower/pull/4160))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:659 -msgid "" -"All `Client` exceptions happening in the VCE are now logged by default " -"and not just exposed to the configured `Strategy` (via the `failures` " -"argument)." -msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" +#: ../../source/ref-changelog.md:31 ../../source/ref-changelog.md:111 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:323 +#: ../../source/ref-changelog.md:397 ../../source/ref-changelog.md:472 +#: ../../source/ref-changelog.md:584 ../../source/ref-changelog.md:674 +#: ../../source/ref-changelog.md:738 ../../source/ref-changelog.md:796 +#: ../../source/ref-changelog.md:865 ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:946 ../../source/ref-changelog.md:1102 +#: ../../source/ref-changelog.md:1173 ../../source/ref-changelog.md:1210 +#: ../../source/ref-changelog.md:1253 +msgid "Incompatible changes" +msgstr "不兼容的更改" -#: ../../source/ref-changelog.md:661 -msgid "" -"**Improve Virtual Client Engine internals** " -"([#1401](https://github.com/adap/flower/pull/1401), " -"[#1453](https://github.com/adap/flower/pull/1453))" -msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" +#: ../../source/ref-changelog.md:35 +#, fuzzy +msgid "v1.11.0 (2024-08-30)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:663 +#: ../../source/ref-changelog.md:41 +#, fuzzy msgid "" -"Some internals of the Virtual Client Engine have been revamped. The VCE " -"now uses Ray 2.0 under the hood, the value type of the `client_resources`" -" dictionary changed to `float` to allow fractions of resources to be " -"allocated." +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`," +" `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert " +"Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` " msgstr "" -"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " -"\"float\",以允许分配分数资源。" - -#: ../../source/ref-changelog.md:665 -msgid "" -"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " -"Client Engine**" -msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:667 -msgid "" -"The Virtual Client Engine now has full support for optional `Client` (and" -" `NumPyClient`) methods." -msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" +#: ../../source/ref-changelog.md:43 ../../source/ref-changelog.md:147 +#: ../../source/ref-changelog.md:245 ../../source/ref-changelog.md:345 +#: ../../source/ref-changelog.md:409 ../../source/ref-changelog.md:502 +#: ../../source/ref-changelog.md:602 ../../source/ref-changelog.md:686 +#: ../../source/ref-changelog.md:750 ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:877 ../../source/ref-changelog.md:1006 +#: ../../source/ref-changelog.md:1048 ../../source/ref-changelog.md:1115 +#: ../../source/ref-changelog.md:1181 ../../source/ref-changelog.md:1226 +#: ../../source/ref-changelog.md:1265 ../../source/ref-changelog.md:1298 +#: ../../source/ref-changelog.md:1348 +msgid "What's new?" +msgstr "有什么新内容?" -#: ../../source/ref-changelog.md:669 +#: ../../source/ref-changelog.md:45 msgid "" -"**Provide type information to packages using** `flwr` " -"([#1377](https://github.com/adap/flower/pull/1377))" +"**Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** " +"([#4006](https://github.com/adap/flower/pull/4006), " +"[#3945](https://github.com/adap/flower/pull/3945), " +"[#3999](https://github.com/adap/flower/pull/3999), " +"[#4027](https://github.com/adap/flower/pull/4027), " +"[#3851](https://github.com/adap/flower/pull/3851), " +"[#3946](https://github.com/adap/flower/pull/3946), " +"[#4003](https://github.com/adap/flower/pull/4003), " +"[#4029](https://github.com/adap/flower/pull/4029), " +"[#3942](https://github.com/adap/flower/pull/3942), " +"[#3957](https://github.com/adap/flower/pull/3957), " +"[#4020](https://github.com/adap/flower/pull/4020), " +"[#4044](https://github.com/adap/flower/pull/4044), " +"[#3852](https://github.com/adap/flower/pull/3852), " +"[#4019](https://github.com/adap/flower/pull/4019), " +"[#4031](https://github.com/adap/flower/pull/4031), " +"[#4036](https://github.com/adap/flower/pull/4036), " +"[#4049](https://github.com/adap/flower/pull/4049), " +"[#4017](https://github.com/adap/flower/pull/4017), " +"[#3943](https://github.com/adap/flower/pull/3943), " +"[#3944](https://github.com/adap/flower/pull/3944), " +"[#4011](https://github.com/adap/flower/pull/4011), " +"[#3619](https://github.com/adap/flower/pull/3619))" msgstr "" -"**使用** `flwr`向软件包提供类型信息 " -"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/ref-changelog.md:671 +#: ../../source/ref-changelog.md:47 msgid "" -"The package `flwr` is now bundled with a `py.typed` file indicating that " -"the package is typed. This enables typing support for projects or " -"packages that use `flwr` by enabling them to improve their code using " -"static type checkers like `mypy`." +"Dynamic code updates are here! `flwr run` can now ship and install the " +"latest version of your `ServerApp` and `ClientApp` to an already-running " +"federation (SuperLink and SuperNodes)." msgstr "" -"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " -"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" -#: ../../source/ref-changelog.md:673 +#: ../../source/ref-changelog.md:49 msgid "" -"**Updated code example** " -"([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" +"How does it work? `flwr run` bundles your Flower app into a single FAB " +"(Flower App Bundle) file. It then ships this FAB file, via the SuperExec," +" to both the SuperLink and those SuperNodes that need it. This allows you" +" to keep SuperExec, SuperLink and SuperNodes running as permanent " +"infrastructure, and then ship code updates (including completely new " +"projects!) dynamically." msgstr "" -"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " -"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:675 -msgid "" -"The code examples covering scikit-learn and PyTorch Lightning have been " -"updated to work with the latest version of Flower." -msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" +#: ../../source/ref-changelog.md:51 +msgid "`flwr run` is all you need." +msgstr "" -#: ../../source/ref-changelog.md:677 +#: ../../source/ref-changelog.md:53 +#, fuzzy msgid "" -"**Updated documentation** " -"([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github.com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**Introduce isolated** `ClientApp` **execution** " +"([#3970](https://github.com/adap/flower/pull/3970), " +"[#3976](https://github.com/adap/flower/pull/3976), " +"[#4002](https://github.com/adap/flower/pull/4002), " +"[#4001](https://github.com/adap/flower/pull/4001), " +"[#4034](https://github.com/adap/flower/pull/4034), " +"[#4037](https://github.com/adap/flower/pull/4037), " +"[#3977](https://github.com/adap/flower/pull/3977), " +"[#4042](https://github.com/adap/flower/pull/4042), " +"[#3978](https://github.com/adap/flower/pull/3978), " +"[#4039](https://github.com/adap/flower/pull/4039), " +"[#4033](https://github.com/adap/flower/pull/4033), " +"[#3971](https://github.com/adap/flower/pull/3971), " +"[#4035](https://github.com/adap/flower/pull/4035), " +"[#3973](https://github.com/adap/flower/pull/3973), " +"[#4032](https://github.com/adap/flower/pull/4032))" msgstr "" -"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " -"[#1558](https://github.com/adap/flower/pull/1558), " -"[#1379](https://github.com/adap/flower/pull/1379), " -"[#1380](https://github.com/adap/flower/pull/1380), " -"[#1381](https://github.com/adap/flower/pull/1381), " -"[#1332](https://github.com/adap/flower/pull/1332), " -"[#1391](https://github.com/adap/flower/pull/1391), " -"[#1403](https://github.com/adap/flower/pull/1403), " -"[#1364](https://github. com/adap/flower/pull/1364), " -"[#1409](https://github.com/adap/flower/pull/1409), " -"[#1419](https://github.com/adap/flower/pull/1419), " -"[#1444](https://github.com/adap/flower/pull/1444), " -"[#1448](https://github.com/adap/flower/pull/1448), " -"[#1417](https://github.com/adap/flower/pull/1417), " -"[#1449](https://github.com/adap/flower/pull/1449), " -"[#1465](https://github.com/adap/flower/pull/1465), " -"[#1467](https://github.com/adap/flower/pull/1467))" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/ref-changelog.md:679 +#: ../../source/ref-changelog.md:55 msgid "" -"There have been so many documentation updates that it doesn't even make " -"sense to list them individually." -msgstr "文档更新的数量之多,甚至没有必要逐一列出。" +"The SuperNode can now run your `ClientApp` in a fully isolated way. In an" +" enterprise deployment, this allows you to set strict limits on what the " +"`ClientApp` can and cannot do." +msgstr "" -#: ../../source/ref-changelog.md:681 -msgid "" -"**Restructured documentation** " -"([#1387](https://github.com/adap/flower/pull/1387))" -msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" +#: ../../source/ref-changelog.md:57 +msgid "`flower-supernode` supports three `--isolation` modes:" +msgstr "" -#: ../../source/ref-changelog.md:683 +#: ../../source/ref-changelog.md:59 msgid "" -"The documentation has been restructured to make it easier to navigate. " -"This is just the first step in a larger effort to make the Flower " -"documentation the best documentation of any project ever. Stay tuned!" -msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" +"Unset: The SuperNode runs the `ClientApp` in the same process (as in " +"previous versions of Flower). This is the default mode." +msgstr "" -#: ../../source/ref-changelog.md:685 +#: ../../source/ref-changelog.md:60 msgid "" -"**Open in Colab button** " -"([#1389](https://github.com/adap/flower/pull/1389))" -msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" +"`--isolation=subprocess`: The SuperNode starts a subprocess to run the " +"`ClientApp`." +msgstr "" -#: ../../source/ref-changelog.md:687 +#: ../../source/ref-changelog.md:61 msgid "" -"The four parts of the Flower Federated Learning Tutorial now come with a " -"new `Open in Colab` button. No need to install anything on your local " -"machine, you can now use and learn about Flower in your browser, it's " -"only a single click away." +"`--isolation=process`: The SuperNode expects an externally-managed " +"process to run the `ClientApp`. This external process is not managed by " +"the SuperNode, so it has to be started beforehand and terminated " +"manually. The common way to use this isolation mode is via the new " +"`flwr/clientapp` Docker image." msgstr "" -"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " -"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/ref-changelog.md:689 +#: ../../source/ref-changelog.md:63 +#, fuzzy msgid "" -"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," -" [#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475))" +"**Improve Docker support for enterprise deployments** " +"([#4050](https://github.com/adap/flower/pull/4050), " +"[#4090](https://github.com/adap/flower/pull/4090), " +"[#3784](https://github.com/adap/flower/pull/3784), " +"[#3998](https://github.com/adap/flower/pull/3998), " +"[#4094](https://github.com/adap/flower/pull/4094), " +"[#3722](https://github.com/adap/flower/pull/3722))" msgstr "" -"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " -"[#1470](https://github.com/adap/flower/pull/1470), " -"[#1472](https://github.com/adap/flower/pull/1472), " -"[#1473](https://github.com/adap/flower/pull/1473), " -"[#1474](https://github.com/adap/flower/pull/1474), " -"[#1475](https://github.com/adap/flower/pull/1475)))" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:691 +#: ../../source/ref-changelog.md:65 msgid "" -"The Flower Federated Learning Tutorial has two brand-new parts covering " -"custom strategies (still WIP) and the distinction between `Client` and " -"`NumPyClient`. The existing parts one and two have also been improved " -"(many small changes and fixes)." +"Flower 1.11 ships many Docker improvements that are especially useful for" +" enterprise deployments:" msgstr "" -"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " -"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" - -#: ../../source/ref-changelog.md:697 -msgid "v1.0.0 (2022-07-28)" -msgstr "v1.0.0 (2022-07-28)" - -#: ../../source/ref-changelog.md:699 -msgid "Highlights" -msgstr "亮点" -#: ../../source/ref-changelog.md:701 -msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" -msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" - -#: ../../source/ref-changelog.md:702 -msgid "All `Client`/`NumPyClient` methods are now optional" -msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" - -#: ../../source/ref-changelog.md:703 -msgid "Configurable `get_parameters`" -msgstr "可配置的`get_parameters`" +#: ../../source/ref-changelog.md:67 +msgid "`flwr/supernode` comes with a new Alpine Docker image." +msgstr "" -#: ../../source/ref-changelog.md:704 +#: ../../source/ref-changelog.md:68 msgid "" -"Tons of small API cleanups resulting in a more coherent developer " -"experience" -msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" +"`flwr/clientapp` is a new image to be used with the `--isolation=process`" +" option. In this mode, SuperNode and `ClientApp` run in two different " +"Docker containers. `flwr/supernode` (preferably the Alpine version) runs " +"the long-running SuperNode with `--isolation=process`. `flwr/clientapp` " +"runs the `ClientApp`. This is the recommended way to deploy Flower in " +"enterprise settings." +msgstr "" -#: ../../source/ref-changelog.md:708 +#: ../../source/ref-changelog.md:69 msgid "" -"We would like to give our **special thanks** to all the contributors who " -"made Flower 1.0 possible (in reverse [GitHub " -"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +"New all-in-one Docker Compose enables you to easily start a full Flower " +"Deployment Engine on a single machine." msgstr "" -"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " -"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" -#: ../../source/ref-changelog.md:710 +#: ../../source/ref-changelog.md:70 msgid "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." +"Completely new Docker documentation: " +"https://flower.ai/docs/framework/docker/index.html" msgstr "" -"[@rtaiello](https://github.com/rtaiello), " -"[@g-pichler](https://github.com/g-pichler), [@rob-" -"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" -"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " -"[@nfnt](https://github.com/nfnt), " -"[@tatiana-s](https://github.com/tatiana-s), " -"[@TParcollet](https://github.com/TParcollet), " -"[@vballoli](https://github.com/vballoli), " -"[@negedng](https://github.com/negedng), " -"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " -"[@hei411](https://github.com/hei411), " -"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " -"[@AmitChaulwar](https://github.com/AmitChaulwar), " -"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" -"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " -"[@lbhm](https://github.com/lbhm), " -"[@sishtiaq](https://github.com/sishtiaq), " -"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" -"/Jueun-Park), [@architjen](https://github.com/architjen), " -"[@PratikGarai](https://github.com/PratikGarai), " -"[@mrinaald](https://github.com/mrinaald), " -"[@zliel](https://github.com/zliel), " -"[@MeiruiJiang](https://github.com/MeiruiJiang), " -"[@sancarlim](https://github.com/sancarlim), " -"[@gubertoli](https://github.com/gubertoli), " -"[@Vingt100](https://github.com/Vingt100), " -"[@MakGulati](https://github.com/MakGulati), " -"[@cozek](https://github.com/cozek), " -"[@jafermarq](https://github.com/jafermarq), " -"[@sisco0](https://github.com/sisco0), " -"[@akhilmathurs](https://github.com/akhilmathurs), " -"[@CanTuerk](https://github.com/CanTuerk), " -"[@mariaboerner1987](https://github.com/mariaboerner1987), " -"[@pedropgusmao](https://github.com/pedropgusmao), " -"[@tanertopal](https://github.com/tanertopal), " -"[@danieljanes](https://github.com/danieljanes)." -#: ../../source/ref-changelog.md:714 +#: ../../source/ref-changelog.md:72 +#, fuzzy msgid "" -"**All arguments must be passed as keyword arguments** " -"([#1338](https://github.com/adap/flower/pull/1338))" -msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" +"**Improve SuperNode authentication** " +"([#4043](https://github.com/adap/flower/pull/4043), " +"[#4047](https://github.com/adap/flower/pull/4047), " +"[#4074](https://github.com/adap/flower/pull/4074))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:716 +#: ../../source/ref-changelog.md:74 msgid "" -"Pass all arguments as keyword arguments, positional arguments are not " -"longer supported. Code that uses positional arguments (e.g., " -"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " -"for each positional argument (e.g., " -"`start_client(server_address=\"127.0.0.1:8080\", " -"client=FlowerClient())`)." +"SuperNode auth has been improved in several ways, including improved " +"logging, improved testing, and improved error handling." msgstr "" -"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " -"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," -" client=FlowerClient())`)。" -#: ../../source/ref-changelog.md:718 +#: ../../source/ref-changelog.md:76 +#, fuzzy msgid "" -"**Introduce configuration object** `ServerConfig` **in** `start_server` " -"**and** `start_simulation` " -"([#1317](https://github.com/adap/flower/pull/1317))" +"**Update** `flwr new` **templates** " +"([#3933](https://github.com/adap/flower/pull/3933), " +"[#3894](https://github.com/adap/flower/pull/3894), " +"[#3930](https://github.com/adap/flower/pull/3930), " +"[#3931](https://github.com/adap/flower/pull/3931), " +"[#3997](https://github.com/adap/flower/pull/3997), " +"[#3979](https://github.com/adap/flower/pull/3979), " +"[#3965](https://github.com/adap/flower/pull/3965), " +"[#4013](https://github.com/adap/flower/pull/4013), " +"[#4064](https://github.com/adap/flower/pull/4064))" msgstr "" -"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " -"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:720 +#: ../../source/ref-changelog.md:78 msgid "" -"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " -"600.0}`, `start_server` and `start_simulation` now expect a configuration" -" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" -" arguments that as the previous config dict, but it makes writing type-" -"safe code easier and the default parameters values more transparent." +"All `flwr new` templates have been updated to show the latest recommended" +" use of Flower APIs." msgstr "" -"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " -"`start_simulation`现在用一个类型为 " -"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " -"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/ref-changelog.md:722 +#: ../../source/ref-changelog.md:80 +#, fuzzy msgid "" -"**Rename built-in strategy parameters for clarity** " -"([#1334](https://github.com/adap/flower/pull/1334))" -msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" +"**Improve Simulation Engine** " +"([#4095](https://github.com/adap/flower/pull/4095), " +"[#3913](https://github.com/adap/flower/pull/3913), " +"[#4059](https://github.com/adap/flower/pull/4059), " +"[#3954](https://github.com/adap/flower/pull/3954), " +"[#4071](https://github.com/adap/flower/pull/4071), " +"[#3985](https://github.com/adap/flower/pull/3985), " +"[#3988](https://github.com/adap/flower/pull/3988))" +msgstr "" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:724 +#: ../../source/ref-changelog.md:82 msgid "" -"The following built-in strategy parameters were renamed to improve " -"readability and consistency with other API's:" -msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" - -#: ../../source/ref-changelog.md:726 -msgid "`fraction_eval` --> `fraction_evaluate`" -msgstr "`fraction_eval` --> `fraction_evaluate`" - -#: ../../source/ref-changelog.md:727 -msgid "`min_eval_clients` --> `min_evaluate_clients`" -msgstr "`min_eval_clients` --> `min_evaluate_clients`" - -#: ../../source/ref-changelog.md:728 -msgid "`eval_fn` --> `evaluate_fn`" -msgstr "`eval_fn` --> `evaluate_fn`" +"The Flower Simulation Engine comes with several updates, including " +"improved run config support, verbose logging, simulation backend " +"configuration via `flwr run`, and more." +msgstr "" -#: ../../source/ref-changelog.md:730 +#: ../../source/ref-changelog.md:84 +#, fuzzy msgid "" -"**Update default arguments of built-in strategies** " -"([#1278](https://github.com/adap/flower/pull/1278))" -msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" +"**Improve** `RecordSet` " +"([#4052](https://github.com/adap/flower/pull/4052), " +"[#3218](https://github.com/adap/flower/pull/3218), " +"[#4016](https://github.com/adap/flower/pull/4016))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:732 +#: ../../source/ref-changelog.md:86 msgid "" -"All built-in strategies now use `fraction_fit=1.0` and " -"`fraction_evaluate=1.0`, which means they select *all* currently " -"available clients for training and evaluation. Projects that relied on " -"the previous default values can get the previous behaviour by " -"initializing the strategy in the following way:" +"`RecordSet` is the core object to exchange model parameters, " +"configuration values and metrics between `ClientApp` and `ServerApp`. " +"This release ships several smaller improvements to `RecordSet` and " +"related `*Record` types." msgstr "" -"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " -"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" -#: ../../source/ref-changelog.md:734 -msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" - -#: ../../source/ref-changelog.md:736 +#: ../../source/ref-changelog.md:88 +#, fuzzy msgid "" -"**Add** `server_round` **to** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**Update documentation** " +"([#3972](https://github.com/adap/flower/pull/3972), " +"[#3925](https://github.com/adap/flower/pull/3925), " +"[#4061](https://github.com/adap/flower/pull/4061), " +"[#3984](https://github.com/adap/flower/pull/3984), " +"[#3917](https://github.com/adap/flower/pull/3917), " +"[#3900](https://github.com/adap/flower/pull/3900), " +"[#4066](https://github.com/adap/flower/pull/4066), " +"[#3765](https://github.com/adap/flower/pull/3765), " +"[#4021](https://github.com/adap/flower/pull/4021), " +"[#3906](https://github.com/adap/flower/pull/3906), " +"[#4063](https://github.com/adap/flower/pull/4063), " +"[#4076](https://github.com/adap/flower/pull/4076), " +"[#3920](https://github.com/adap/flower/pull/3920), " +"[#3916](https://github.com/adap/flower/pull/3916))" msgstr "" -"**添加*** `server_round` ***到*** `Strategy.evaluate` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" + +#: ../../source/ref-changelog.md:90 +msgid "" +"Many parts of the documentation, including the main tutorial, have been " +"migrated to show new Flower APIs and other new Flower features like the " +"improved Docker support." +msgstr "" + +#: ../../source/ref-changelog.md:92 +msgid "" +"**Migrate code example to use new Flower APIs** " +"([#3758](https://github.com/adap/flower/pull/3758), " +"[#3701](https://github.com/adap/flower/pull/3701), " +"[#3919](https://github.com/adap/flower/pull/3919), " +"[#3918](https://github.com/adap/flower/pull/3918), " +"[#3934](https://github.com/adap/flower/pull/3934), " +"[#3893](https://github.com/adap/flower/pull/3893), " +"[#3833](https://github.com/adap/flower/pull/3833), " +"[#3922](https://github.com/adap/flower/pull/3922), " +"[#3846](https://github.com/adap/flower/pull/3846), " +"[#3777](https://github.com/adap/flower/pull/3777), " +"[#3874](https://github.com/adap/flower/pull/3874), " +"[#3873](https://github.com/adap/flower/pull/3873), " +"[#3935](https://github.com/adap/flower/pull/3935), " +"[#3754](https://github.com/adap/flower/pull/3754), " +"[#3980](https://github.com/adap/flower/pull/3980), " +"[#4089](https://github.com/adap/flower/pull/4089), " +"[#4046](https://github.com/adap/flower/pull/4046), " +"[#3314](https://github.com/adap/flower/pull/3314), " +"[#3316](https://github.com/adap/flower/pull/3316), " +"[#3295](https://github.com/adap/flower/pull/3295), " +"[#3313](https://github.com/adap/flower/pull/3313))" +msgstr "" + +#: ../../source/ref-changelog.md:94 +msgid "Many code examples have been migrated to use new Flower APIs." +msgstr "" + +#: ../../source/ref-changelog.md:96 +msgid "" +"**Update Flower framework, framework internals and quality " +"infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), " +"[#4053](https://github.com/adap/flower/pull/4053), " +"[#4098](https://github.com/adap/flower/pull/4098), " +"[#4067](https://github.com/adap/flower/pull/4067), " +"[#4105](https://github.com/adap/flower/pull/4105), " +"[#4048](https://github.com/adap/flower/pull/4048), " +"[#4107](https://github.com/adap/flower/pull/4107), " +"[#4069](https://github.com/adap/flower/pull/4069), " +"[#3915](https://github.com/adap/flower/pull/3915), " +"[#4101](https://github.com/adap/flower/pull/4101), " +"[#4108](https://github.com/adap/flower/pull/4108), " +"[#3914](https://github.com/adap/flower/pull/3914), " +"[#4068](https://github.com/adap/flower/pull/4068), " +"[#4041](https://github.com/adap/flower/pull/4041), " +"[#4040](https://github.com/adap/flower/pull/4040), " +"[#3986](https://github.com/adap/flower/pull/3986), " +"[#4026](https://github.com/adap/flower/pull/4026), " +"[#3961](https://github.com/adap/flower/pull/3961), " +"[#3975](https://github.com/adap/flower/pull/3975), " +"[#3983](https://github.com/adap/flower/pull/3983), " +"[#4091](https://github.com/adap/flower/pull/4091), " +"[#3982](https://github.com/adap/flower/pull/3982), " +"[#4079](https://github.com/adap/flower/pull/4079), " +"[#4073](https://github.com/adap/flower/pull/4073), " +"[#4060](https://github.com/adap/flower/pull/4060), " +"[#4106](https://github.com/adap/flower/pull/4106), " +"[#4080](https://github.com/adap/flower/pull/4080), " +"[#3974](https://github.com/adap/flower/pull/3974), " +"[#3996](https://github.com/adap/flower/pull/3996), " +"[#3991](https://github.com/adap/flower/pull/3991), " +"[#3981](https://github.com/adap/flower/pull/3981), " +"[#4093](https://github.com/adap/flower/pull/4093), " +"[#4100](https://github.com/adap/flower/pull/4100), " +"[#3939](https://github.com/adap/flower/pull/3939), " +"[#3955](https://github.com/adap/flower/pull/3955), " +"[#3940](https://github.com/adap/flower/pull/3940), " +"[#4038](https://github.com/adap/flower/pull/4038))" +msgstr "" + +#: ../../source/ref-changelog.md:98 ../../source/ref-changelog.md:205 +msgid "" +"As always, many parts of the Flower framework and quality infrastructure " +"were improved and updated." +msgstr "" + +#: ../../source/ref-changelog.md:100 ../../source/ref-changelog.md:217 +#: ../../source/ref-changelog.md:309 ../../source/ref-changelog.md:1292 +msgid "Deprecations" +msgstr "停用" -#: ../../source/ref-changelog.md:738 +#: ../../source/ref-changelog.md:102 +#, fuzzy msgid "" -"The `Strategy` method `evaluate` now receives the current round of " -"federated learning/evaluation as the first parameter." -msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" +"**Deprecate accessing `Context` via `Client.context`** " +"([#3797](https://github.com/adap/flower/pull/3797))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:104 msgid "" -"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" +"Now that both `client_fn` and `server_fn` receive a `Context` object, " +"accessing `Context` via `Client.context` is deprecated. `Client.context` " +"will be removed in a future release. If you need to access `Context` in " +"your `Client` implementation, pass it manually when creating the `Client`" +" instance in `client_fn`:" msgstr "" -"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " -"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/ref-changelog.md:742 +#: ../../source/ref-changelog.md:113 +#, fuzzy msgid "" -"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " -"three parameters: (1) The current round of federated learning/evaluation " -"(`server_round`), (2) the model parameters to evaluate (`parameters`), " -"and (3) a config dictionary (`config`)." +"**Update CLIs to accept an app directory instead of** `ClientApp` **and**" +" `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), " +"[#4077](https://github.com/adap/flower/pull/4077), " +"[#3850](https://github.com/adap/flower/pull/3850))" msgstr "" -"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " -"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:115 msgid "" -"**Rename** `rnd` **to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" +"The CLI commands `flower-supernode` and `flower-server-app` now accept an" +" app directory as argument (instead of references to a `ClientApp` or " +"`ServerApp`). An app directory is any directory containing a " +"`pyproject.toml` file (with the appropriate Flower config fields set). " +"The easiest way to generate a compatible project structure is to use " +"`flwr new`." msgstr "" -"**重新命名** `rnd` ** to** `server_round` " -"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:117 +#, fuzzy msgid "" -"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " -"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " -"current round of federated learning/evaluation as their first parameter. " -"To improve reaability and avoid confusion with *random*, this parameter " -"has been renamed from `rnd` to `server_round`." +"**Disable** `flower-client-app` **CLI command** " +"([#4022](https://github.com/adap/flower/pull/4022))" msgstr "" -"几个 Flower " -"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" -" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:748 -msgid "" -"**Move** `flwr.dataset` **to** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" +#: ../../source/ref-changelog.md:119 +msgid "`flower-client-app` has been disabled. Use `flower-supernode` instead." msgstr "" -"**移动*** `flwr.dataset` **到*** `flwr_baselines` " -"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/ref-changelog.md:750 -msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." -msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" +#: ../../source/ref-changelog.md:121 +#, fuzzy +msgid "" +"**Use spaces instead of commas for separating config args** " +"([#4000](https://github.com/adap/flower/pull/4000))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/ref-changelog.md:752 +#: ../../source/ref-changelog.md:123 msgid "" -"**Remove experimental strategies** " -"([#1280](https://github.com/adap/flower/pull/1280))" -msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" +"When passing configs (run config, node config) to Flower, you now need to" +" separate key-value pairs using spaces instead of commas. For example:" +msgstr "" -#: ../../source/ref-changelog.md:754 +#: ../../source/ref-changelog.md:129 +msgid "Previously, you could pass configs using commas, like this:" +msgstr "" + +#: ../../source/ref-changelog.md:135 +#, fuzzy msgid "" -"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " -"`FedFSv1`)." -msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" +"**Remove** `flwr example` **CLI command** " +"([#4084](https://github.com/adap/flower/pull/4084))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:756 +#: ../../source/ref-changelog.md:137 msgid "" -"**Rename** `Weights` **to** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"The experimental `flwr example` CLI command has been removed. Use `flwr " +"new` to generate a project and then run it using `flwr run`." msgstr "" -"**重新命名** `Weights` **到** `NDArrays` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/ref-changelog.md:758 +#: ../../source/ref-changelog.md:139 +#, fuzzy +msgid "v1.10.0 (2024-07-24)" +msgstr "v1.0.0 (2022-07-28)" + +#: ../../source/ref-changelog.md:145 +#, fuzzy msgid "" -"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " -"capture what this type is all about." -msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, " +"`Ikko Eltociear Ashimine`, `Javier`, `Jiahao Tan`, `Mohammad Naseri`, " +"`Robert Steiner`, `Sebastian van der Voort`, `Taner Topal`, `Yan Gao` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:760 +#: ../../source/ref-changelog.md:149 +#, fuzzy msgid "" -"**Remove antiquated** `force_final_distributed_eval` **from** " -"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Introduce** `flwr run` **(beta)** " +"([#3810](https://github.com/adap/flower/pull/3810), " +"[#3826](https://github.com/adap/flower/pull/3826), " +"[#3880](https://github.com/adap/flower/pull/3880), " +"[#3807](https://github.com/adap/flower/pull/3807), " +"[#3800](https://github.com/adap/flower/pull/3800), " +"[#3814](https://github.com/adap/flower/pull/3814), " +"[#3811](https://github.com/adap/flower/pull/3811), " +"[#3809](https://github.com/adap/flower/pull/3809), " +"[#3819](https://github.com/adap/flower/pull/3819))" msgstr "" -"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " -"([#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:762 +#: ../../source/ref-changelog.md:151 msgid "" -"The `start_server` parameter `force_final_distributed_eval` has long been" -" a historic artefact, in this release it is finally gone for good." +"Flower 1.10 ships the first beta release of the new `flwr run` command. " +"`flwr run` can run different projects using `flwr run path/to/project`, " +"it enables you to easily switch between different federations using `flwr" +" run . federation` and it runs your Flower project using either local " +"simulation or the new (experimental) SuperExec service. This allows " +"Flower to scale federatated learning from fast local simulation to large-" +"scale production deployment, seamlessly. All projects generated with " +"`flwr new` are immediately runnable using `flwr run`. Give it a try: use " +"`flwr new` to generate a project and then run it using `flwr run`." msgstr "" -"start_server \"参数 \"force_final_distributed_eval " -"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/ref-changelog.md:764 +#: ../../source/ref-changelog.md:153 +#, fuzzy msgid "" -"**Make** `get_parameters` **configurable** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**Introduce run config** " +"([#3751](https://github.com/adap/flower/pull/3751), " +"[#3750](https://github.com/adap/flower/pull/3750), " +"[#3845](https://github.com/adap/flower/pull/3845), " +"[#3824](https://github.com/adap/flower/pull/3824), " +"[#3746](https://github.com/adap/flower/pull/3746), " +"[#3728](https://github.com/adap/flower/pull/3728), " +"[#3730](https://github.com/adap/flower/pull/3730), " +"[#3725](https://github.com/adap/flower/pull/3725), " +"[#3729](https://github.com/adap/flower/pull/3729), " +"[#3580](https://github.com/adap/flower/pull/3580), " +"[#3578](https://github.com/adap/flower/pull/3578), " +"[#3576](https://github.com/adap/flower/pull/3576), " +"[#3798](https://github.com/adap/flower/pull/3798), " +"[#3732](https://github.com/adap/flower/pull/3732), " +"[#3815](https://github.com/adap/flower/pull/3815))" msgstr "" -"**使** `get_parameters` **可配置** " -"([#1242](https://github.com/adap/flower/pull/1242))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:766 +#: ../../source/ref-changelog.md:155 msgid "" -"The `get_parameters` method now accepts a configuration dictionary, just " -"like `get_properties`, `fit`, and `evaluate`." +"The new run config feature allows you to run your Flower project in " +"different configurations without having to change a single line of code. " +"You can now build a configurable `ServerApp` and `ClientApp` that read " +"configuration values at runtime. This enables you to specify config " +"values like `learning-rate=0.01` in `pyproject.toml` (under the " +"`[tool.flwr.app.config]` key). These config values can then be easily " +"overridden via `flwr run --run-config learning-rate=0.02`, and read from " +"`Context` using `lr = context.run_config[\"learning-rate\"]`. Create a " +"new project using `flwr new` to see run config in action." msgstr "" -"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " -"\"一样,都接受配置字典。" -#: ../../source/ref-changelog.md:768 +#: ../../source/ref-changelog.md:157 +#, fuzzy msgid "" -"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " -"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +"**Generalize** `client_fn` **signature to** `client_fn(context: Context) " +"-> Client` ([#3779](https://github.com/adap/flower/pull/3779), " +"[#3697](https://github.com/adap/flower/pull/3697), " +"[#3694](https://github.com/adap/flower/pull/3694), " +"[#3696](https://github.com/adap/flower/pull/3696))" msgstr "" -"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " -"([#1281](https://github.com/adap/flower/pull/1281))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:770 +#: ../../source/ref-changelog.md:159 msgid "" -"The `start_simulation` function now accepts a configuration dictionary " -"`config` instead of the `num_rounds` integer. This improves the " -"consistency between `start_simulation` and `start_server` and makes " -"transitioning between the two easier." +"The `client_fn` signature has been generalized to `client_fn(context: " +"Context) -> Client`. It now receives a `Context` object instead of the " +"(now depreacated) `cid: str`. `Context` allows accessing `node_id`, " +"`node_config` and `run_config`, among other things. This enables you to " +"build a configurable `ClientApp` that leverages the new run config " +"system." msgstr "" -"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " -"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/ref-changelog.md:774 +#: ../../source/ref-changelog.md:161 msgid "" -"**Support Python 3.10** " -"([#1320](https://github.com/adap/flower/pull/1320))" -msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" +"The previous signature `client_fn(cid: str)` is now deprecated and " +"support for it will be removed in a future release. Use " +"`client_fn(context: Context) -> Client` everywhere." +msgstr "" -#: ../../source/ref-changelog.md:776 +#: ../../source/ref-changelog.md:163 +#, fuzzy msgid "" -"The previous Flower release introduced experimental support for Python " -"3.10, this release declares Python 3.10 support as stable." -msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" +"**Introduce new** `server_fn(context)` " +"([#3773](https://github.com/adap/flower/pull/3773), " +"[#3796](https://github.com/adap/flower/pull/3796), " +"[#3771](https://github.com/adap/flower/pull/3771))" +msgstr "" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/ref-changelog.md:778 +#: ../../source/ref-changelog.md:165 msgid "" -"**Make all** `Client` **and** `NumPyClient` **methods optional** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" +"In addition to the new `client_fn(context:Context)`, a new " +"`server_fn(context: Context) -> ServerAppComponents` can now be passed to" +" `ServerApp` (instead of passing, for example, `Strategy`, directly). " +"This enables you to leverage the full `Context` on the server-side to " +"build a configurable `ServerApp`." msgstr "" -"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " -"([#1260](https://github.com/adap/flower/pull/1260), " -"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/ref-changelog.md:780 +#: ../../source/ref-changelog.md:167 +#, fuzzy msgid "" -"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " -"`fit`, and `evaluate` are all optional. This enables writing clients that" -" implement, for example, only `fit`, but no other method. No need to " -"implement `evaluate` when using centralized evaluation!" +"**Relaunch all** `flwr new` **templates** " +"([#3877](https://github.com/adap/flower/pull/3877), " +"[#3821](https://github.com/adap/flower/pull/3821), " +"[#3587](https://github.com/adap/flower/pull/3587), " +"[#3795](https://github.com/adap/flower/pull/3795), " +"[#3875](https://github.com/adap/flower/pull/3875), " +"[#3859](https://github.com/adap/flower/pull/3859), " +"[#3760](https://github.com/adap/flower/pull/3760))" msgstr "" -"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " -"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " -"`evaluate`!" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:782 +#: ../../source/ref-changelog.md:169 msgid "" -"**Enable passing a** `Server` **instance to** `start_simulation` " -"([#1281](https://github.com/adap/flower/pull/1281))" +"All `flwr new` templates have been significantly updated to showcase new " +"Flower features and best practices. This includes using `flwr run` and " +"the new run config feature. You can now easily create a new project using" +" `flwr new` and, after following the instructions to install it, `flwr " +"run` it." msgstr "" -"**启用向** `start_simulation` 传递** `Server` 实例 " -"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:784 +#: ../../source/ref-changelog.md:171 +#, fuzzy msgid "" -"Similar to `start_server`, `start_simulation` now accepts a full `Server`" -" instance. This enables users to heavily customize the execution of " -"eperiments and opens the door to running, for example, async FL using the" -" Virtual Client Engine." +"**Introduce** `flower-supernode` **(preview)** " +"([#3353](https://github.com/adap/flower/pull/3353))" msgstr "" -"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " -"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:786 +#: ../../source/ref-changelog.md:173 msgid "" -"**Update code examples** " -"([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" +"The new `flower-supernode` CLI is here to replace `flower-client-app`. " +"`flower-supernode` brings full multi-app support to the Flower client-" +"side. It also allows to pass `--node-config` to the SuperNode, which is " +"accessible in your `ClientApp` via `Context` (using the new " +"`client_fn(context: Context)` signature)." msgstr "" -"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " -"[#1286](https://github.com/adap/flower/pull/1286), " -"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/ref-changelog.md:788 +#: ../../source/ref-changelog.md:175 +#, fuzzy msgid "" -"Many code examples received small or even large maintenance updates, " -"among them are" -msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" - -#: ../../source/ref-changelog.md:790 -msgid "`scikit-learn`" -msgstr "`scikit-learn`" +"**Introduce node config** " +"([#3782](https://github.com/adap/flower/pull/3782), " +"[#3780](https://github.com/adap/flower/pull/3780), " +"[#3695](https://github.com/adap/flower/pull/3695), " +"[#3886](https://github.com/adap/flower/pull/3886))" +msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:791 -msgid "`simulation_pytorch`" -msgstr "`simulation_pytorch`" +#: ../../source/ref-changelog.md:177 +msgid "" +"A new node config feature allows you to pass a static configuration to " +"the SuperNode. This configuration is read-only and available to every " +"`ClientApp` running on that SuperNode. A `ClientApp` can access the node " +"config via `Context` (`context.node_config`)." +msgstr "" -#: ../../source/ref-changelog.md:792 -msgid "`quickstart_pytorch`" -msgstr "`quickstart_pytorch`" +#: ../../source/ref-changelog.md:179 +msgid "" +"**Introduce SuperExec (experimental)** " +"([#3605](https://github.com/adap/flower/pull/3605), " +"[#3723](https://github.com/adap/flower/pull/3723), " +"[#3731](https://github.com/adap/flower/pull/3731), " +"[#3589](https://github.com/adap/flower/pull/3589), " +"[#3604](https://github.com/adap/flower/pull/3604), " +"[#3622](https://github.com/adap/flower/pull/3622), " +"[#3838](https://github.com/adap/flower/pull/3838), " +"[#3720](https://github.com/adap/flower/pull/3720), " +"[#3606](https://github.com/adap/flower/pull/3606), " +"[#3602](https://github.com/adap/flower/pull/3602), " +"[#3603](https://github.com/adap/flower/pull/3603), " +"[#3555](https://github.com/adap/flower/pull/3555), " +"[#3808](https://github.com/adap/flower/pull/3808), " +"[#3724](https://github.com/adap/flower/pull/3724), " +"[#3658](https://github.com/adap/flower/pull/3658), " +"[#3629](https://github.com/adap/flower/pull/3629))" +msgstr "" -#: ../../source/ref-changelog.md:793 -msgid "`quickstart_simulation`" -msgstr "`quickstart_simulation`" +#: ../../source/ref-changelog.md:181 +msgid "" +"This is the first experimental release of Flower SuperExec, a new service" +" that executes your runs. It's not ready for production deployment just " +"yet, but don't hesitate to give it a try if you're interested." +msgstr "" -#: ../../source/ref-changelog.md:794 -msgid "`quickstart_tensorflow`" -msgstr "`quickstart_tensorflow`" +#: ../../source/ref-changelog.md:183 +#, fuzzy +msgid "" +"**Add new federated learning with tabular data example** " +"([#3568](https://github.com/adap/flower/pull/3568))" +msgstr "" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:795 -msgid "`advanced_tensorflow`" -msgstr "`advanced_tensorflow`" +#: ../../source/ref-changelog.md:185 +msgid "" +"A new code example exemplifies a federated learning setup using the " +"Flower framework on the Adult Census Income tabular dataset." +msgstr "" -#: ../../source/ref-changelog.md:797 +#: ../../source/ref-changelog.md:187 +#, fuzzy msgid "" -"**Remove the obsolete simulation example** " -"([#1328](https://github.com/adap/flower/pull/1328))" -msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" +"**Create generic adapter layer (preview)** " +"([#3538](https://github.com/adap/flower/pull/3538), " +"[#3536](https://github.com/adap/flower/pull/3536), " +"[#3540](https://github.com/adap/flower/pull/3540))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:799 +#: ../../source/ref-changelog.md:189 msgid "" -"Removes the obsolete `simulation` example and renames " -"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " -"naming of `simulation_pytorch`" +"A new generic gRPC adapter layer allows 3rd-party frameworks to integrate" +" with Flower in a transparent way. This makes Flower more modular and " +"allows for integration into other federated learning solutions and " +"platforms." msgstr "" -"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " -"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" -#: ../../source/ref-changelog.md:801 +#: ../../source/ref-changelog.md:191 +#, fuzzy msgid "" -"**Update documentation** " -"([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**Refactor Flower Simulation Engine** " +"([#3581](https://github.com/adap/flower/pull/3581), " +"[#3471](https://github.com/adap/flower/pull/3471), " +"[#3804](https://github.com/adap/flower/pull/3804), " +"[#3468](https://github.com/adap/flower/pull/3468), " +"[#3839](https://github.com/adap/flower/pull/3839), " +"[#3806](https://github.com/adap/flower/pull/3806), " +"[#3861](https://github.com/adap/flower/pull/3861), " +"[#3543](https://github.com/adap/flower/pull/3543), " +"[#3472](https://github.com/adap/flower/pull/3472), " +"[#3829](https://github.com/adap/flower/pull/3829), " +"[#3469](https://github.com/adap/flower/pull/3469))" msgstr "" -"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " -"[#1209](https://github.com/adap/flower/pull/1209), " -"[#1251](https://github.com/adap/flower/pull/1251), " -"[#1257](https://github.com/adap/flower/pull/1257), " -"[#1267](https://github.com/adap/flower/pull/1267), " -"[#1268](https://github.com/adap/flower/pull/1268), " -"[#1300](https://github.com/adap/flower/pull/1300), " -"[#1304](https://github.com/adap/flower/pull/1304), " -"[#1305](https://github.com/adap/flower/pull/1305), " -"[#1307](https://github.com/adap/flower/pull/1307))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:803 +#: ../../source/ref-changelog.md:193 msgid "" -"One substantial documentation update fixes multiple smaller rendering " -"issues, makes titles more succinct to improve navigation, removes a " -"deprecated library, updates documentation dependencies, includes the " -"`flwr.common` module in the API reference, includes support for markdown-" -"based documentation, migrates the changelog from `.rst` to `.md`, and " -"fixes a number of smaller details!" +"The Simulation Engine was significantly refactored. This results in " +"faster and more stable simulations. It is also the foundation for " +"upcoming changes that aim to provide the next level of performance and " +"configurability in federated learning simulations." msgstr "" -"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " -"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " -"`.md`,并修复了一些较小的细节!" -#: ../../source/ref-changelog.md:805 ../../source/ref-changelog.md:860 -#: ../../source/ref-changelog.md:929 ../../source/ref-changelog.md:968 -msgid "**Minor updates**" -msgstr "**小规模更新**" +#: ../../source/ref-changelog.md:195 +#, fuzzy +msgid "" +"**Optimize Docker containers** " +"([#3591](https://github.com/adap/flower/pull/3591))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" -#: ../../source/ref-changelog.md:807 +#: ../../source/ref-changelog.md:197 msgid "" -"Add round number to fit and evaluate log messages " -"([#1266](https://github.com/adap/flower/pull/1266))" -msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" +"Flower Docker containers were optimized and updated to use that latest " +"Flower framework features." +msgstr "" -#: ../../source/ref-changelog.md:808 +#: ../../source/ref-changelog.md:199 +#, fuzzy msgid "" -"Add secure gRPC connection to the `advanced_tensorflow` code example " -"([#847](https://github.com/adap/flower/pull/847))" +"**Improve logging** ([#3776](https://github.com/adap/flower/pull/3776), " +"[#3789](https://github.com/adap/flower/pull/3789))" msgstr "" -"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " -"([#847](https://github.com/adap/flower/pull/847))" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:809 +#: ../../source/ref-changelog.md:201 msgid "" -"Update developer tooling " -"([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310))" +"Improved logging aims to be more concise and helpful to show you the " +"details you actually care about." msgstr "" -"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " -"[#1276](https://github.com/adap/flower/pull/1276), " -"[#1301](https://github.com/adap/flower/pull/1301), " -"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:810 +#: ../../source/ref-changelog.md:203 +#, fuzzy msgid "" -"Rename ProtoBuf messages to improve consistency " -"([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259))" +"**Refactor framework internals** " +"([#3621](https://github.com/adap/flower/pull/3621), " +"[#3792](https://github.com/adap/flower/pull/3792), " +"[#3772](https://github.com/adap/flower/pull/3772), " +"[#3805](https://github.com/adap/flower/pull/3805), " +"[#3583](https://github.com/adap/flower/pull/3583), " +"[#3825](https://github.com/adap/flower/pull/3825), " +"[#3597](https://github.com/adap/flower/pull/3597), " +"[#3802](https://github.com/adap/flower/pull/3802), " +"[#3569](https://github.com/adap/flower/pull/3569))" msgstr "" -"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " -"[#1258](https://github.com/adap/flower/pull/1258), " -"[#1259](https://github.com/adap/flower/pull/1259)" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:812 -msgid "v0.19.0 (2022-05-18)" -msgstr "v0.19.0 (2022-05-18)" +#: ../../source/ref-changelog.md:207 +#, fuzzy +msgid "Documentation improvements" +msgstr "可选的改进措施" -#: ../../source/ref-changelog.md:816 +#: ../../source/ref-changelog.md:209 +#, fuzzy msgid "" -"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" -msgstr "" -"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " -"([#919](https://github.com/adap/flower/pull/919), " -"[#1127](https://github.com/adap/flower/pull/1127), " -"[#914](https://github.com/adap/flower/pull/914))" +"**Add 🇰🇷 Korean translations** " +"([#3680](https://github.com/adap/flower/pull/3680))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/ref-changelog.md:818 +#: ../../source/ref-changelog.md:211 #, fuzzy msgid "" -"The first preview release of Flower Baselines has arrived! We're " -"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " -"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " -"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " -"With this first preview release we're also inviting the community to " -"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" -"contribute-baselines.html)." +"**Update translations** " +"([#3586](https://github.com/adap/flower/pull/3586), " +"[#3679](https://github.com/adap/flower/pull/3679), " +"[#3570](https://github.com/adap/flower/pull/3570), " +"[#3681](https://github.com/adap/flower/pull/3681), " +"[#3617](https://github.com/adap/flower/pull/3617), " +"[#3674](https://github.com/adap/flower/pull/3674), " +"[#3671](https://github.com/adap/flower/pull/3671), " +"[#3572](https://github.com/adap/flower/pull/3572), " +"[#3631](https://github.com/adap/flower/pull/3631))" msgstr "" -"Flower Baselines 的第一个预览版已经发布!我们通过实现 " -"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " -"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" -"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" -"/contributing-baselines.html)。" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:820 +#: ../../source/ref-changelog.md:213 +#, fuzzy msgid "" -"**C++ client SDK (preview) and code example** " -"([#1111](https://github.com/adap/flower/pull/1111))" -msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" +"**Update documentation** " +"([#3864](https://github.com/adap/flower/pull/3864), " +"[#3688](https://github.com/adap/flower/pull/3688), " +"[#3562](https://github.com/adap/flower/pull/3562), " +"[#3641](https://github.com/adap/flower/pull/3641), " +"[#3384](https://github.com/adap/flower/pull/3384), " +"[#3634](https://github.com/adap/flower/pull/3634), " +"[#3823](https://github.com/adap/flower/pull/3823), " +"[#3793](https://github.com/adap/flower/pull/3793), " +"[#3707](https://github.com/adap/flower/pull/3707))" +msgstr "" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:822 +#: ../../source/ref-changelog.md:215 msgid "" -"Preview support for Flower clients written in C++. The C++ preview " -"includes a Flower client SDK and a quickstart code example that " -"demonstrates a simple C++ client using the SDK." +"Updated documentation includes new install instructions for different " +"shells, a new Flower Code Examples documentation landing page, new `flwr`" +" CLI docs and an updated federated XGBoost code example." msgstr "" -"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " -"演示了一个简单的 C++ 客户端。" -#: ../../source/ref-changelog.md:824 -msgid "" -"**Add experimental support for Python 3.10 and Python 3.11** " -"([#1135](https://github.com/adap/flower/pull/1135))" +#: ../../source/ref-changelog.md:219 +msgid "**Deprecate** `client_fn(cid: str)`" msgstr "" -"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " -"([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/ref-changelog.md:826 +#: ../../source/ref-changelog.md:221 msgid "" -"Python 3.10 is the latest stable release of Python and Python 3.11 is due" -" to be released in October. This Flower release adds experimental support" -" for both Python versions." +"`client_fn` used to have a signature `client_fn(cid: str) -> Client`. " +"This signature is now deprecated. Use the new signature " +"`client_fn(context: Context) -> Client` instead. The new argument " +"`context` allows accessing `node_id`, `node_config`, `run_config` and " +"other `Context` features. When running using the simulation engine (or " +"using `flower-supernode` with a custom `--node-config partition-id=...`)," +" `context.node_config[\"partition-id\"]` will return an `int` partition " +"ID that can be used with Flower Datasets to load a different partition of" +" the dataset on each simulated or deployed SuperNode." msgstr "" -"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " -"Python 版本的实验支持。" -#: ../../source/ref-changelog.md:828 +#: ../../source/ref-changelog.md:223 msgid "" -"**Aggregate custom metrics through user-provided functions** " -"([#1144](https://github.com/adap/flower/pull/1144))" -msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" +"**Deprecate passing** `Server/ServerConfig/Strategy/ClientManager` **to**" +" `ServerApp` **directly**" +msgstr "" -#: ../../source/ref-changelog.md:830 +#: ../../source/ref-changelog.md:225 msgid "" -"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" -" customize the strategy. Built-in strategies support two new arguments, " -"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " -"allow passing custom metric aggregation functions." +"Creating `ServerApp` using `ServerApp(config=config, strategy=strategy)` " +"is now deprecated. Instead of passing " +"`Server/ServerConfig/Strategy/ClientManager` to `ServerApp` directly, " +"pass them wrapped in a `server_fn(context: Context) -> " +"ServerAppComponents` function, like this: " +"`ServerApp(server_fn=server_fn)`. `ServerAppComponents` can hold " +"references to `Server/ServerConfig/Strategy/ClientManager`. In addition " +"to that, `server_fn` allows you to access `Context` (for example, to read" +" the `run_config`)." msgstr "" -"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " -"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" -#: ../../source/ref-changelog.md:832 +#: ../../source/ref-changelog.md:229 +#, fuzzy msgid "" -"**User-configurable round timeout** " -"([#1162](https://github.com/adap/flower/pull/1162))" -msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" +"**Remove support for `client_ids` in `start_simulation`** " +"([#3699](https://github.com/adap/flower/pull/3699))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/ref-changelog.md:834 +#: ../../source/ref-changelog.md:231 msgid "" -"A new configuration value allows the round timeout to be set for " -"`start_server` and `start_simulation`. If the `config` dictionary " -"contains a `round_timeout` key (with a `float` value in seconds), the " -"server will wait *at least* `round_timeout` seconds before it closes the " -"connection." +"The (rarely used) feature that allowed passing custom `client_ids` to the" +" `start_simulation` function was removed. This removal is part of a " +"bigger effort to refactor the simulation engine and unify how the Flower " +"internals work in simulation and deployment." msgstr "" -"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " -"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" -#: ../../source/ref-changelog.md:836 +#: ../../source/ref-changelog.md:233 +#, fuzzy msgid "" -"**Enable both federated evaluation and centralized evaluation to be used " -"at the same time in all built-in strategies** " -"([#1091](https://github.com/adap/flower/pull/1091))" -msgstr "" -"**允许在所有内置策略中同时使用联邦评价和集中评估** " -"([#1091](https://github.com/adap/flower/pull/1091))" +"**Remove `flower-driver-api` and `flower-fleet-api`** " +"([#3418](https://github.com/adap/flower/pull/3418))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-changelog.md:838 +#: ../../source/ref-changelog.md:235 msgid "" -"Built-in strategies can now perform both federated evaluation (i.e., " -"client-side) and centralized evaluation (i.e., server-side) in the same " -"round. Federated evaluation can be disabled by setting `fraction_eval` to" -" `0.0`." +"The two deprecated CLI commands `flower-driver-api` and `flower-fleet-" +"api` were removed in an effort to streamline the SuperLink developer " +"experience. Use `flower-superlink` instead." msgstr "" -"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " -"`0.0`来禁用联邦评估。" -#: ../../source/ref-changelog.md:840 +#: ../../source/ref-changelog.md:237 +#, fuzzy +msgid "v1.9.0 (2024-06-10)" +msgstr "v1.3.0 (2023-02-06)" + +#: ../../source/ref-changelog.md:243 +#, fuzzy msgid "" -"**Two new Jupyter Notebook tutorials** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. " +"Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Mahdi Beitollahi`," +" `Robert Steiner`, `Taner Topal`, `Yan Gao`, `bapic`, `mohammadnaseri` " msgstr "" -"**两本新的 Jupyter Notebook 教程** " -"([#1141](https://github.com/adap/flower/pull/1141))" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:842 +#: ../../source/ref-changelog.md:247 +#, fuzzy msgid "" -"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " -"basic and intermediate Flower features:" -msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" +"**Introduce built-in authentication (preview)** " +"([#2946](https://github.com/adap/flower/pull/2946), " +"[#3388](https://github.com/adap/flower/pull/3388), " +"[#2948](https://github.com/adap/flower/pull/2948), " +"[#2917](https://github.com/adap/flower/pull/2917), " +"[#3386](https://github.com/adap/flower/pull/3386), " +"[#3308](https://github.com/adap/flower/pull/3308), " +"[#3001](https://github.com/adap/flower/pull/3001), " +"[#3409](https://github.com/adap/flower/pull/3409), " +"[#2999](https://github.com/adap/flower/pull/2999), " +"[#2979](https://github.com/adap/flower/pull/2979), " +"[#3389](https://github.com/adap/flower/pull/3389), " +"[#3503](https://github.com/adap/flower/pull/3503), " +"[#3366](https://github.com/adap/flower/pull/3366), " +"[#3357](https://github.com/adap/flower/pull/3357))" +msgstr "" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:844 +#: ../../source/ref-changelog.md:249 msgid "" -"*An Introduction to Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" +"Flower 1.9 introduces the first build-in version of client node " +"authentication. In previous releases, users often wrote glue code to " +"connect Flower to external authentication systems. With this release, the" +" SuperLink can authenticate SuperNodes using a built-in authentication " +"system. A new [how-to guide](https://flower.ai/docs/framework/how-to-" +"authenticate-supernodes.html) and a new [code " +"example](https://github.com/adap/flower/tree/main/examples/flower-" +"authentication) help you to get started." msgstr "" -"*联邦学习简介*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" -"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:846 +#: ../../source/ref-changelog.md:251 msgid "" -"*Using Strategies in Federated Learning*: [Open in " -"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" +"This is the first preview release of the Flower-native authentication " +"system. Many additional features are on the roadmap for upcoming Flower " +"releases - stay tuned." msgstr "" -"*在联邦学习中使用策略*: [在 Colab " -"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" -"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/ref-changelog.md:848 +#: ../../source/ref-changelog.md:253 +#, fuzzy msgid "" -"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**Introduce end-to-end Docker support** " +"([#3483](https://github.com/adap/flower/pull/3483), " +"[#3266](https://github.com/adap/flower/pull/3266), " +"[#3390](https://github.com/adap/flower/pull/3390), " +"[#3283](https://github.com/adap/flower/pull/3283), " +"[#3285](https://github.com/adap/flower/pull/3285), " +"[#3391](https://github.com/adap/flower/pull/3391), " +"[#3403](https://github.com/adap/flower/pull/3403), " +"[#3458](https://github.com/adap/flower/pull/3458), " +"[#3533](https://github.com/adap/flower/pull/3533), " +"[#3453](https://github.com/adap/flower/pull/3453), " +"[#3486](https://github.com/adap/flower/pull/3486), " +"[#3290](https://github.com/adap/flower/pull/3290))" msgstr "" -"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " -"([#1076](https://github.com/adap/flower/pull/1076))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:850 +#: ../../source/ref-changelog.md:255 msgid "" -"The new `FedAvgM` strategy implements Federated Averaging with Server " -"Momentum \\[Hsu et al., 2019\\]." -msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." +"Full Flower Next Docker support is here! With the release of Flower 1.9, " +"Flower provides stable Docker images for the Flower SuperLink, the Flower" +" SuperNode, and the Flower `ServerApp`. This set of images enables you to" +" run all Flower components in Docker. Check out the new [how-to " +"guide](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html) to get stated." +msgstr "" -#: ../../source/ref-changelog.md:852 +#: ../../source/ref-changelog.md:257 +#, fuzzy msgid "" -"**New advanced PyTorch code example** " -"([#1007](https://github.com/adap/flower/pull/1007))" -msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" +"**Re-architect Flower Next simulation engine** " +"([#3307](https://github.com/adap/flower/pull/3307), " +"[#3355](https://github.com/adap/flower/pull/3355), " +"[#3272](https://github.com/adap/flower/pull/3272), " +"[#3273](https://github.com/adap/flower/pull/3273), " +"[#3417](https://github.com/adap/flower/pull/3417), " +"[#3281](https://github.com/adap/flower/pull/3281), " +"[#3343](https://github.com/adap/flower/pull/3343), " +"[#3326](https://github.com/adap/flower/pull/3326))" +msgstr "" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:854 +#: ../../source/ref-changelog.md:259 msgid "" -"A new code example (`advanced_pytorch`) demonstrates advanced Flower " -"concepts with PyTorch." -msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" +"Flower Next simulations now use a new in-memory `Driver` that improves " +"the reliability of simulations, especially in notebook environments. This" +" is a significant step towards a complete overhaul of the Flower Next " +"simulation architecture." +msgstr "" -#: ../../source/ref-changelog.md:856 +#: ../../source/ref-changelog.md:261 +#, fuzzy msgid "" -"**New JAX code example** " -"([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143))" +"**Upgrade simulation engine** " +"([#3354](https://github.com/adap/flower/pull/3354), " +"[#3378](https://github.com/adap/flower/pull/3378), " +"[#3262](https://github.com/adap/flower/pull/3262), " +"[#3435](https://github.com/adap/flower/pull/3435), " +"[#3501](https://github.com/adap/flower/pull/3501), " +"[#3482](https://github.com/adap/flower/pull/3482), " +"[#3494](https://github.com/adap/flower/pull/3494))" msgstr "" -"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " -"[#1143](https://github.com/adap/flower/pull/1143)" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-changelog.md:858 +#: ../../source/ref-changelog.md:263 msgid "" -"A new code example (`jax_from_centralized_to_federated`) shows federated " -"learning with JAX and Flower." -msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" +"The Flower Next simulation engine comes with improved and configurable " +"logging. The Ray-based simulation backend in Flower 1.9 was updated to " +"use Ray 2.10." +msgstr "" -#: ../../source/ref-changelog.md:862 +#: ../../source/ref-changelog.md:265 +#, fuzzy msgid "" -"New option to keep Ray running if Ray was already initialized in " -"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" -msgstr "" -"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " -"运行([#1177](https://github.com/adap/flower/pull/1177))" +"**Introduce FedPFT baseline** " +"([#3268](https://github.com/adap/flower/pull/3268))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:863 +#: ../../source/ref-changelog.md:267 msgid "" -"Add support for custom `ClientManager` as a `start_simulation` parameter " -"([#1171](https://github.com/adap/flower/pull/1171))" +"FedPFT allows you to perform one-shot Federated Learning by leveraging " +"widely available foundational models, dramatically reducing communication" +" costs while delivering high performing models. This is work led by Mahdi" +" Beitollahi from Huawei Noah's Ark Lab (Montreal, Canada). Read all the " +"details in their paper: \"Parametric Feature Transfer: One-shot Federated" +" Learning with Foundation Models\" " +"([arxiv](https://arxiv.org/abs/2402.01862))" msgstr "" -"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " -"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/ref-changelog.md:864 +#: ../../source/ref-changelog.md:269 +#, fuzzy msgid "" -"New documentation for [implementing " -"strategies](https://flower.ai/docs/framework/how-to-implement-" -"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175))" +"**Launch additional** `flwr new` **templates for Apple MLX, Hugging Face " +"Transformers, scikit-learn and TensorFlow** " +"([#3291](https://github.com/adap/flower/pull/3291), " +"[#3139](https://github.com/adap/flower/pull/3139), " +"[#3284](https://github.com/adap/flower/pull/3284), " +"[#3251](https://github.com/adap/flower/pull/3251), " +"[#3376](https://github.com/adap/flower/pull/3376), " +"[#3287](https://github.com/adap/flower/pull/3287))" msgstr "" -"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" -" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " -"[#1175](https://github.com/adap/flower/pull/1175)" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/ref-changelog.md:865 +#: ../../source/ref-changelog.md:271 msgid "" -"New mobile-friendly documentation theme " -"([#1174](https://github.com/adap/flower/pull/1174))" -msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" +"The `flwr` CLI's `flwr new` command is starting to become everone's " +"favorite way of creating new Flower projects. This release introduces " +"additional `flwr new` templates for Apple MLX, Hugging Face Transformers," +" scikit-learn and TensorFlow. In addition to that, existing templates " +"also received updates." +msgstr "" -#: ../../source/ref-changelog.md:866 +#: ../../source/ref-changelog.md:273 +#, fuzzy msgid "" -"Limit version range for (optional) `ray` dependency to include only " -"compatible releases (`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**Refine** `RecordSet` **API** " +"([#3209](https://github.com/adap/flower/pull/3209), " +"[#3331](https://github.com/adap/flower/pull/3331), " +"[#3334](https://github.com/adap/flower/pull/3334), " +"[#3335](https://github.com/adap/flower/pull/3335), " +"[#3375](https://github.com/adap/flower/pull/3375), " +"[#3368](https://github.com/adap/flower/pull/3368))" msgstr "" -"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " -"([#1205](https://github.com/adap/flower/pull/1205))" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:870 +#: ../../source/ref-changelog.md:275 msgid "" -"**Remove deprecated support for Python 3.6** " -"([#871](https://github.com/adap/flower/pull/871))" -msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" +"`RecordSet` is part of the Flower Next low-level API preview release. In " +"Flower 1.9, `RecordSet` received a number of usability improvements that " +"make it easier to build `RecordSet`-based `ServerApp`s and `ClientApp`s." +msgstr "" -#: ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:277 +#, fuzzy msgid "" -"**Remove deprecated KerasClient** " -"([#857](https://github.com/adap/flower/pull/857))" -msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" - -#: ../../source/ref-changelog.md:872 -msgid "" -"**Remove deprecated no-op extra installs** " -"([#973](https://github.com/adap/flower/pull/973))" -msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" - -#: ../../source/ref-changelog.md:873 -msgid "" -"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " -"([#869](https://github.com/adap/flower/pull/869))" +"**Beautify logging** ([#3379](https://github.com/adap/flower/pull/3379), " +"[#3430](https://github.com/adap/flower/pull/3430), " +"[#3461](https://github.com/adap/flower/pull/3461), " +"[#3360](https://github.com/adap/flower/pull/3360), " +"[#3433](https://github.com/adap/flower/pull/3433))" msgstr "" -"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " -"([#869](https://github.com/adap/flower/pull/869))" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-changelog.md:874 +#: ../../source/ref-changelog.md:279 msgid "" -"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " -"([#1107](https://github.com/adap/flower/pull/1107))" +"Logs received a substantial update. Not only are logs now much nicer to " +"look at, but they are also more configurable." msgstr "" -"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " -"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/ref-changelog.md:875 +#: ../../source/ref-changelog.md:281 +#, fuzzy msgid "" -"**Remove deprecated DefaultStrategy strategy** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**Improve reliability** " +"([#3564](https://github.com/adap/flower/pull/3564), " +"[#3561](https://github.com/adap/flower/pull/3561), " +"[#3566](https://github.com/adap/flower/pull/3566), " +"[#3462](https://github.com/adap/flower/pull/3462), " +"[#3225](https://github.com/adap/flower/pull/3225), " +"[#3514](https://github.com/adap/flower/pull/3514), " +"[#3535](https://github.com/adap/flower/pull/3535), " +"[#3372](https://github.com/adap/flower/pull/3372))" msgstr "" -"**删除过时的 DefaultStrategy 策略** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:876 +#: ../../source/ref-changelog.md:283 msgid "" -"**Remove deprecated support for eval_fn accuracy return value** " -"([#1142](https://github.com/adap/flower/pull/1142))" +"Flower 1.9 includes reliability improvements across many parts of the " +"system. One example is a much improved SuperNode shutdown procedure." msgstr "" -"**删除已过时的对 eval_fn 返回值准确性的支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/ref-changelog.md:877 +#: ../../source/ref-changelog.md:285 +#, fuzzy msgid "" -"**Remove deprecated support for passing initial parameters as NumPy " -"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +"**Update Swift and C++ SDKs** " +"([#3321](https://github.com/adap/flower/pull/3321), " +"[#2763](https://github.com/adap/flower/pull/2763))" msgstr "" -"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " -"([#1142](https://github.com/adap/flower/pull/1142))" - -#: ../../source/ref-changelog.md:879 -msgid "v0.18.0 (2022-02-28)" -msgstr "v0.18.0 (2022-02-28)" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/ref-changelog.md:883 +#: ../../source/ref-changelog.md:287 msgid "" -"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " -"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" +"In the C++ SDK, communication-related code is now separate from main " +"client logic. A new abstract class `Communicator` has been introduced " +"alongside a gRPC implementation of it." msgstr "" -"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " -"([#866](https://github.com/adap/flower/pull/866), " -"[#872](https://github.com/adap/flower/pull/872), " -"[#833](https://github.com/adap/flower/pull/833), " -"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/ref-changelog.md:885 +#: ../../source/ref-changelog.md:289 msgid "" -"Simulations (using the Virtual Client Engine through `start_simulation`) " -"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " -"installing Flower with the `simulation` extra (`pip install " -"'flwr[simulation]'`)." +"**Improve testing, tooling and CI/CD infrastructure** " +"([#3294](https://github.com/adap/flower/pull/3294), " +"[#3282](https://github.com/adap/flower/pull/3282), " +"[#3311](https://github.com/adap/flower/pull/3311), " +"[#2878](https://github.com/adap/flower/pull/2878), " +"[#3333](https://github.com/adap/flower/pull/3333), " +"[#3255](https://github.com/adap/flower/pull/3255), " +"[#3349](https://github.com/adap/flower/pull/3349), " +"[#3400](https://github.com/adap/flower/pull/3400), " +"[#3401](https://github.com/adap/flower/pull/3401), " +"[#3399](https://github.com/adap/flower/pull/3399), " +"[#3346](https://github.com/adap/flower/pull/3346), " +"[#3398](https://github.com/adap/flower/pull/3398), " +"[#3397](https://github.com/adap/flower/pull/3397), " +"[#3347](https://github.com/adap/flower/pull/3347), " +"[#3502](https://github.com/adap/flower/pull/3502), " +"[#3387](https://github.com/adap/flower/pull/3387), " +"[#3542](https://github.com/adap/flower/pull/3542), " +"[#3396](https://github.com/adap/flower/pull/3396), " +"[#3496](https://github.com/adap/flower/pull/3496), " +"[#3465](https://github.com/adap/flower/pull/3465), " +"[#3473](https://github.com/adap/flower/pull/3473), " +"[#3484](https://github.com/adap/flower/pull/3484), " +"[#3521](https://github.com/adap/flower/pull/3521), " +"[#3363](https://github.com/adap/flower/pull/3363), " +"[#3497](https://github.com/adap/flower/pull/3497), " +"[#3464](https://github.com/adap/flower/pull/3464), " +"[#3495](https://github.com/adap/flower/pull/3495), " +"[#3478](https://github.com/adap/flower/pull/3478), " +"[#3271](https://github.com/adap/flower/pull/3271))" msgstr "" -"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " -"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation` " -"使用虚拟客户端引擎)现在可以更流畅地运行。" -#: ../../source/ref-changelog.md:887 +#: ../../source/ref-changelog.md:291 msgid "" -"**New Jupyter Notebook code example** " -"([#833](https://github.com/adap/flower/pull/833))" +"As always, the Flower tooling, testing, and CI/CD infrastructure has " +"received many updates." msgstr "" -"**新的 Jupyter Notebook 代码示例** " -"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/ref-changelog.md:889 +#: ../../source/ref-changelog.md:293 msgid "" -"A new code example (`quickstart_simulation`) demonstrates Flower " -"simulations using the Virtual Client Engine through Jupyter Notebook " -"(incl. Google Colab)." +"**Improve documentation** " +"([#3530](https://github.com/adap/flower/pull/3530), " +"[#3539](https://github.com/adap/flower/pull/3539), " +"[#3425](https://github.com/adap/flower/pull/3425), " +"[#3520](https://github.com/adap/flower/pull/3520), " +"[#3286](https://github.com/adap/flower/pull/3286), " +"[#3516](https://github.com/adap/flower/pull/3516), " +"[#3523](https://github.com/adap/flower/pull/3523), " +"[#3545](https://github.com/adap/flower/pull/3545), " +"[#3498](https://github.com/adap/flower/pull/3498), " +"[#3439](https://github.com/adap/flower/pull/3439), " +"[#3440](https://github.com/adap/flower/pull/3440), " +"[#3382](https://github.com/adap/flower/pull/3382), " +"[#3559](https://github.com/adap/flower/pull/3559), " +"[#3432](https://github.com/adap/flower/pull/3432), " +"[#3278](https://github.com/adap/flower/pull/3278), " +"[#3371](https://github.com/adap/flower/pull/3371), " +"[#3519](https://github.com/adap/flower/pull/3519), " +"[#3267](https://github.com/adap/flower/pull/3267), " +"[#3204](https://github.com/adap/flower/pull/3204), " +"[#3274](https://github.com/adap/flower/pull/3274))" msgstr "" -"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " -"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" - -#: ../../source/ref-changelog.md:891 -msgid "" -"**Client properties (feature preview)** " -"([#795](https://github.com/adap/flower/pull/795))" -msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" - -#: ../../source/ref-changelog.md:893 -msgid "" -"Clients can implement a new method `get_properties` to enable server-side" -" strategies to query client properties." -msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" - -#: ../../source/ref-changelog.md:895 -msgid "" -"**Experimental Android support with TFLite** " -"([#865](https://github.com/adap/flower/pull/865))" -msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/ref-changelog.md:897 +#: ../../source/ref-changelog.md:295 msgid "" -"Android support has finally arrived in `main`! Flower is both client-" -"agnostic and framework-agnostic by design. One can integrate arbitrary " -"client platforms and with this release, using Flower on Android has " -"become a lot easier." +"As always, the Flower documentation has received many updates. Notable " +"new pages include:" msgstr "" -"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " -"Flower 就变得更容易了。" -#: ../../source/ref-changelog.md:899 +#: ../../source/ref-changelog.md:297 msgid "" -"The example uses TFLite on the client side, along with a new " -"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " -"still experimental, but they are a first step towards a fully-fledged " -"Android SDK and a unified `FedAvg` implementation that integrated the new" -" functionality from `FedAvgAndroid`." +"[How-to upgrate to Flower Next (Flower Next migration " +"guide)](https://flower.ai/docs/framework/how-to-upgrade-to-flower-" +"next.html)" msgstr "" -"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " -"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " -"`FedAvg`实现迈出的第一步。" -#: ../../source/ref-changelog.md:901 +#: ../../source/ref-changelog.md:299 +#, fuzzy msgid "" -"**Make gRPC keepalive time user-configurable and decrease default " -"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +"[How-to run Flower using Docker](https://flower.ai/docs/framework/how-to-" +"run-flower-using-docker.html)" msgstr "" -"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " -"([#1069](https://github.com/adap/flower/pull/1069))" +"`TensorFlow快速入门 (教程) `_" -#: ../../source/ref-changelog.md:903 +#: ../../source/ref-changelog.md:301 msgid "" -"The default gRPC keepalive time has been reduced to increase the " -"compatibility of Flower with more cloud environments (for example, " -"Microsoft Azure). Users can configure the keepalive time to customize the" -" gRPC stack based on specific requirements." +"[Flower Mods reference](https://flower.ai/docs/framework/ref-" +"api/flwr.client.mod.html#module-flwr.client.mod)" msgstr "" -"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " -"keepalive 时间,自定义 gRPC 堆栈。" -#: ../../source/ref-changelog.md:905 +#: ../../source/ref-changelog.md:303 +#, fuzzy msgid "" -"**New differential privacy example using Opacus and PyTorch** " -"([#805](https://github.com/adap/flower/pull/805))" +"**General updates to Flower Examples** " +"([#3205](https://github.com/adap/flower/pull/3205), " +"[#3226](https://github.com/adap/flower/pull/3226), " +"[#3211](https://github.com/adap/flower/pull/3211), " +"[#3252](https://github.com/adap/flower/pull/3252), " +"[#3427](https://github.com/adap/flower/pull/3427), " +"[#3410](https://github.com/adap/flower/pull/3410), " +"[#3426](https://github.com/adap/flower/pull/3426), " +"[#3228](https://github.com/adap/flower/pull/3228), " +"[#3342](https://github.com/adap/flower/pull/3342), " +"[#3200](https://github.com/adap/flower/pull/3200), " +"[#3202](https://github.com/adap/flower/pull/3202), " +"[#3394](https://github.com/adap/flower/pull/3394), " +"[#3488](https://github.com/adap/flower/pull/3488), " +"[#3329](https://github.com/adap/flower/pull/3329), " +"[#3526](https://github.com/adap/flower/pull/3526), " +"[#3392](https://github.com/adap/flower/pull/3392), " +"[#3474](https://github.com/adap/flower/pull/3474), " +"[#3269](https://github.com/adap/flower/pull/3269))" msgstr "" -"**使用 Opacus 和 PyTorch 的新差分隐私示例** " -"([#805](https://github.com/adap/flower/pull/805))" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/ref-changelog.md:907 -msgid "" -"A new code example (`opacus`) demonstrates differentially-private " -"federated learning with Opacus, PyTorch, and Flower." -msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" +#: ../../source/ref-changelog.md:305 +#, fuzzy +msgid "As always, Flower code examples have received many updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" -#: ../../source/ref-changelog.md:909 +#: ../../source/ref-changelog.md:307 msgid "" -"**New Hugging Face Transformers code example** " -"([#863](https://github.com/adap/flower/pull/863))" -msgstr "" -"**新的Hugging Face Transformers代码示例** " -"([#863](https://github.com/adap/flower/pull/863))" - -#: ../../source/ref-changelog.md:911 -msgid "" -"A new code example (`quickstart_huggingface`) demonstrates usage of " -"Hugging Face Transformers with Flower." -msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" - -#: ../../source/ref-changelog.md:913 -msgid "" -"**New MLCube code example** " -"([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" -msgstr "" -"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " -"[#1034](https://github.com/adap/flower/pull/1034), " -"[#1065](https://github.com/adap/flower/pull/1065), " -"[#1090](https://github.com/adap/flower/pull/1090))" - -#: ../../source/ref-changelog.md:915 -msgid "" -"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " -"with Flower." -msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" - -#: ../../source/ref-changelog.md:917 -msgid "" -"**SSL-enabled server and client** " -"([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" +"**General improvements** " +"([#3532](https://github.com/adap/flower/pull/3532), " +"[#3318](https://github.com/adap/flower/pull/3318), " +"[#3565](https://github.com/adap/flower/pull/3565), " +"[#3296](https://github.com/adap/flower/pull/3296), " +"[#3305](https://github.com/adap/flower/pull/3305), " +"[#3246](https://github.com/adap/flower/pull/3246), " +"[#3224](https://github.com/adap/flower/pull/3224), " +"[#3475](https://github.com/adap/flower/pull/3475), " +"[#3297](https://github.com/adap/flower/pull/3297), " +"[#3317](https://github.com/adap/flower/pull/3317), " +"[#3429](https://github.com/adap/flower/pull/3429), " +"[#3196](https://github.com/adap/flower/pull/3196), " +"[#3534](https://github.com/adap/flower/pull/3534), " +"[#3240](https://github.com/adap/flower/pull/3240), " +"[#3365](https://github.com/adap/flower/pull/3365), " +"[#3407](https://github.com/adap/flower/pull/3407), " +"[#3563](https://github.com/adap/flower/pull/3563), " +"[#3344](https://github.com/adap/flower/pull/3344), " +"[#3330](https://github.com/adap/flower/pull/3330), " +"[#3436](https://github.com/adap/flower/pull/3436), " +"[#3300](https://github.com/adap/flower/pull/3300), " +"[#3327](https://github.com/adap/flower/pull/3327), " +"[#3254](https://github.com/adap/flower/pull/3254), " +"[#3253](https://github.com/adap/flower/pull/3253), " +"[#3419](https://github.com/adap/flower/pull/3419), " +"[#3289](https://github.com/adap/flower/pull/3289), " +"[#3208](https://github.com/adap/flower/pull/3208), " +"[#3245](https://github.com/adap/flower/pull/3245), " +"[#3319](https://github.com/adap/flower/pull/3319), " +"[#3203](https://github.com/adap/flower/pull/3203), " +"[#3423](https://github.com/adap/flower/pull/3423), " +"[#3352](https://github.com/adap/flower/pull/3352), " +"[#3292](https://github.com/adap/flower/pull/3292), " +"[#3261](https://github.com/adap/flower/pull/3261))" msgstr "" -"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " -"[#844](https://github.com/adap/flower/pull/844), " -"[#845](https://github.com/adap/flower/pull/845), " -"[#847](https://github.com/adap/flower/pull/847), " -"[#993](https://github.com/adap/flower/pull/993), " -"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:919 -msgid "" -"SSL enables secure encrypted connections between clients and servers. " -"This release open-sources the Flower secure gRPC implementation to make " -"encrypted communication channels accessible to all Flower users." -msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" - -#: ../../source/ref-changelog.md:921 -msgid "" -"**Updated** `FedAdam` **and** `FedYogi` **strategies** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" -msgstr "" -"**更新**`FedAdam`**和**`FedYogi`**战略** " -"([#885](https://github.com/adap/flower/pull/885), " -"[#895](https://github.com/adap/flower/pull/895))" +#: ../../source/ref-changelog.md:311 +#, fuzzy +msgid "**Deprecate Python 3.8 support**" +msgstr "** 过时的 Python 3.8**" -#: ../../source/ref-changelog.md:923 +#: ../../source/ref-changelog.md:313 +#, fuzzy msgid "" -"`FedAdam` and `FedAdam` match the latest version of the Adaptive " -"Federated Optimization paper." -msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" +"Python 3.8 will stop receiving security fixes in [October " +"2024](https://devguide.python.org/versions/). Support for Python 3.8 is " +"now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.8 已于 2024-10-01 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" -#: ../../source/ref-changelog.md:925 +#: ../../source/ref-changelog.md:315 +#, fuzzy msgid "" -"**Initialize** `start_simulation` **with a list of client IDs** " -"([#860](https://github.com/adap/flower/pull/860))" +"**Deprecate (experimental)** `flower-driver-api` **and** `flower-fleet-" +"api` ([#3416](https://github.com/adap/flower/pull/3416), " +"[#3420](https://github.com/adap/flower/pull/3420))" msgstr "" -"**初始化** `start_simulation` **使用客户端 ID 列表** " -"([#860](https://github.com/adap/flower/pull/860))" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:927 +#: ../../source/ref-changelog.md:317 msgid "" -"`start_simulation` can now be called with a list of client IDs " -"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " -"`client_fn` whenever a client needs to be initialized, which can make it " -"easier to load data partitions that are not accessible through `int` " -"identifiers." +"Flower 1.9 deprecates the two (experimental) commands `flower-driver-api`" +" and `flower-fleet-api`. Both commands will be removed in an upcoming " +"release. Use `flower-superlink` instead." msgstr "" -"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " -"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " -"`int` 标识符访问的数据分区。" -#: ../../source/ref-changelog.md:931 +#: ../../source/ref-changelog.md:319 +#, fuzzy msgid "" -"Update `num_examples` calculation in PyTorch code examples in " -"([#909](https://github.com/adap/flower/pull/909))" +"**Deprecate** `--server` **in favor of** `--superlink` " +"([#3518](https://github.com/adap/flower/pull/3518))" msgstr "" -"更新 PyTorch 代码示例中的 \"num_examples \"计算 " -"([#909](https://github.com/adap/flower/pull/909))" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/ref-changelog.md:932 +#: ../../source/ref-changelog.md:321 msgid "" -"Expose Flower version through `flwr.__version__` " -"([#952](https://github.com/adap/flower/pull/952))" +"The commands `flower-server-app` and `flower-client-app` should use " +"`--superlink` instead of the now deprecated `--server`. Support for " +"`--server` will be removed in a future release." msgstr "" -"通过 `flwr.__version__` 公开 Flower 版本 " -"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/ref-changelog.md:933 +#: ../../source/ref-changelog.md:325 msgid "" -"`start_server` in `app.py` now returns a `History` object containing " -"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +"**Replace** `flower-superlink` **CLI option** `--certificates` **with** " +"`--ssl-ca-certfile` **,** `--ssl-certfile` **and** `--ssl-keyfile` " +"([#3512](https://github.com/adap/flower/pull/3512), " +"[#3408](https://github.com/adap/flower/pull/3408))" msgstr "" -"`app.py`中的 `start_server`现在会返回一个 `History` " -"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/ref-changelog.md:934 +#: ../../source/ref-changelog.md:327 msgid "" -"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " -"([#978](https://github.com/adap/flower/pull/978))" +"SSL-related `flower-superlink` CLI arguments were restructured in an " +"incompatible way. Instead of passing a single `--certificates` flag with " +"three values, you now need to pass three flags (`--ssl-ca-certfile`, " +"`--ssl-certfile` and `--ssl-keyfile`) with one value each. Check out the " +"[SSL connections](https://flower.ai/docs/framework/how-to-enable-ssl-" +"connections.html) documentation page for details." msgstr "" -"使 `max_workers`(由 " -"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" - -#: ../../source/ref-changelog.md:935 -msgid "" -"Increase sleep time after server start to three seconds in all code " -"examples ([#1086](https://github.com/adap/flower/pull/1086))" -msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" - -#: ../../source/ref-changelog.md:936 -msgid "" -"Added a new FAQ section to the documentation " -"([#948](https://github.com/adap/flower/pull/948))" -msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/ref-changelog.md:937 +#: ../../source/ref-changelog.md:329 +#, fuzzy msgid "" -"And many more under-the-hood changes, library updates, documentation " -"changes, and tooling improvements!" -msgstr "还有更多底层更改、库更新、文档更改和工具改进!" +"**Remove SuperLink** `--vce` **option** " +"([#3513](https://github.com/adap/flower/pull/3513))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/ref-changelog.md:941 +#: ../../source/ref-changelog.md:331 msgid "" -"**Removed** `flwr_example` **and** `flwr_experimental` **from release " -"build** ([#869](https://github.com/adap/flower/pull/869))" +"Instead of separately starting a SuperLink and a `ServerApp` for " +"simulation, simulations must now be started using the single `flower-" +"simulation` command." msgstr "" -"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " -"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/ref-changelog.md:943 +#: ../../source/ref-changelog.md:333 +#, fuzzy msgid "" -"The packages `flwr_example` and `flwr_experimental` have been deprecated " -"since Flower 0.12.0 and they are not longer included in Flower release " -"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " -"an upcoming release." +"**Merge** `--grpc-rere` **and** `--rest` **SuperLink options** " +"([#3527](https://github.com/adap/flower/pull/3527))" msgstr "" -"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " -"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" -"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" - -#: ../../source/ref-changelog.md:945 -msgid "v0.17.0 (2021-09-24)" -msgstr "v0.17.0 (2021-09-24)" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-changelog.md:949 +#: ../../source/ref-changelog.md:335 msgid "" -"**Experimental virtual client engine** " -"([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" +"To simplify the usage of `flower-superlink`, previously separate sets of " +"CLI options for gRPC and REST were merged into one unified set of " +"options. Consult the [Flower CLI reference " +"documentation](https://flower.ai/docs/framework/ref-api-cli.html) for " +"details." msgstr "" -"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " -"[#790](https://github.com/adap/flower/pull/790) " -"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/ref-changelog.md:951 -msgid "" -"One of Flower's goals is to enable research at scale. This release " -"enables a first (experimental) peek at a major new feature, codenamed the" -" virtual client engine. Virtual clients enable simulations that scale to " -"a (very) large number of clients on a single machine or compute cluster. " -"The easiest way to test the new functionality is to look at the two new " -"code examples called `quickstart_simulation` and `simulation_pytorch`." -msgstr "" -"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " -"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " -"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" +#: ../../source/ref-changelog.md:337 +#, fuzzy +msgid "v1.8.0 (2024-04-03)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:953 +#: ../../source/ref-changelog.md:343 +#, fuzzy msgid "" -"The feature is still experimental, so there's no stability guarantee for " -"the API. It's also not quite ready for prime time and comes with a few " -"known caveats. However, those who are curious are encouraged to try it " -"out and share their thoughts." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata " +"Nugraha`, `Danny`, `Gustavo Bertoli`, `Heng Pan`, `Ikko Eltociear " +"Ashimine`, `Jack Cook`, `Javier`, `Raj Parekh`, `Robert Steiner`, " +"`Sebastian van der Voort`, `Taner Topal`, `Yan Gao`, `mohammadnaseri`, " +"`tabdar-khan` " msgstr "" -"该功能仍处于试验阶段,因此无法保证 API " -"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:955 +#: ../../source/ref-changelog.md:347 +#, fuzzy msgid "" -"**New built-in strategies** " -"([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822))" -msgstr "" -"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " -"[#822](https://github.com/adap/flower/pull/822)" +"**Introduce Flower Next high-level API (stable)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" +msgstr "" +"**介绍 Flower Next 高级应用程序接口(稳定版)** " +"([#3002](https://github.com/adap/flower/pull/3002), " +"[#2934](https://github.com/adap/flower/pull/2934), " +"[#2958](https://github.com/adap/flower/pull/2958), " +"[#3173](https://github.com/adap/flower/pull/3173), " +"[#3174](https://github.com/adap/flower/pull/3174), " +"[#2923](https://github.com/adap/flower/pull/2923), " +"[#2691](https://github.com/adap/flower/pull/2691), " +"[#3079](https://github.com/adap/flower/pull/3079), " +"[#2961](https://github.com/adap/flower/pull/2961), " +"[#2924](https://github.com/adap/flower/pull/2924), " +"[#3166](https://github.com/adap/flower/pull/3166), " +"[#3031](https://github.com/adap/flower/pull/3031), " +"[#3057](https://github.com/adap/flower/pull/3057), " +"[#3000](https://github.com/adap/flower/pull/3000), " +"[#3113](https://github.com/adap/flower/pull/3113), " +"[#2957](https://github.com/adap/flower/pull/2957), " +"[#3183](https://github.com/adap/flower/pull/3183), " +"[#3180](https://github.com/adap/flower/pull/3180), " +"[#3035](https://github.com/adap/flower/pull/3035), " +"[#3189](https://github.com/adap/flower/pull/3189), " +"[#3185](https://github.com/adap/flower/pull/3185), " +"[#3190](https://github.com/adap/flower/pull/3190), " +"[#3191](https://github.com/adap/flower/pull/3191), " +"[#3195](https://github.com/adap/flower/pull/3195), " +"[#3197](https://github.com/adap/flower/pull/3197))" -#: ../../source/ref-changelog.md:957 +#: ../../source/ref-changelog.md:349 +#, fuzzy msgid "" -"FedYogi - Federated learning strategy using Yogi on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +"The Flower Next high-level API is stable! Flower Next is the future of " +"Flower - all new features (like Flower Mods) will be built on top of it. " +"You can start to migrate your existing projects to Flower Next by using " +"`ServerApp` and `ClientApp` (check out `quickstart-pytorch` or " +"`quickstart-tensorflow`, a detailed migration guide will follow shortly)." +" Flower Next allows you to run multiple projects concurrently (we call " +"this multi-run) and execute the same project in either simulation " +"environments or deployment environments without having to change a single" +" line of code. The best part? It's fully compatible with existing Flower " +"projects that use `Strategy`, `NumPyClient` & co." +msgstr "" +"Flower Next 高级应用程序接口已经稳定!Flower Next 是 Flower 的未来 - 所有新功能(如 Flower " +"Mods)都将构建在它之上。您可以使用 `ServerApp` 和 `ClientApp` 开始将现有项目迁移到 Flower Next(请查看 " +"`quickstart-pytorch` 或 `quickstart-tensorflow` ,详细的迁移指南将在不久后发布)。Flower " +"Next 允许您同时运行多个项目(我们称之为多重运行),并在模拟环境或部署环境中执行同一项目,而无需更改任何代码。最棒的是什么?它与使用 " +"`Strategy`、`NumPyClient` 等的现有 Flower 项目完全兼容。" -#: ../../source/ref-changelog.md:958 +#: ../../source/ref-changelog.md:351 +#, fuzzy msgid "" -"FedAdam - Federated learning strategy using Adam on server-side. " -"Implementation based on https://arxiv.org/abs/2003.00295" -msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" +"**Introduce Flower Next low-level API (preview)** " +"([#3062](https://github.com/adap/flower/pull/3062), " +"[#3034](https://github.com/adap/flower/pull/3034), " +"[#3069](https://github.com/adap/flower/pull/3069))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-changelog.md:960 +#: ../../source/ref-changelog.md:353 +#, fuzzy msgid "" -"**New PyTorch Lightning code example** " -"([#617](https://github.com/adap/flower/pull/617))" +"In addition to the Flower Next *high-level* API that uses `Strategy`, " +"`NumPyClient` & co, Flower 1.8 also comes with a preview version of the " +"new Flower Next *low-level* API. The low-level API allows for granular " +"control of every aspect of the learning process by sending/receiving " +"individual messages to/from client nodes. The new `ServerApp` supports " +"registering a custom `main` function that allows writing custom training " +"loops for methods like async FL, cyclic training, or federated analytics." +" The new `ClientApp` supports registering `train`, `evaluate` and `query`" +" functions that can access the raw message received from the `ServerApp`." +" New abstractions like `RecordSet`, `Message` and `Context` further " +"enable sending multiple models, multiple sets of config values and " +"metrics, stateful computations on the client node and implementations of " +"custom SMPC protocols, to name just a few." msgstr "" -"**新的 PyTorch Lightning 代码示例** " -"([#617](https://github.com/adap/flower/pull/617))" +"除了使用 \"Strategy\"、\"NumPyClient \"等的 Flower Next 高级应用程序接口外,Flower 1.8 " +"还提供了新的 Flower Next " +"低级应用程序接口的预览版。低级应用程序接口允许通过向/从客户端节点发送/接收单个消息,对学习过程的各个方面进行细粒度控制。新的 " +"\"ServerApp \"支持注册一个自定义的 \"main \"函数,允许为异步FL、循环训练或联合分析等方法编写自定义训练循环。新的 " +"\"ClientApp \"支持注册 \"训练\"、\"评估 \"和 \"查询 \"函数,这些函数可以访问从 \"ServerApp " +"\"接收到的原始信息。新的抽象(如 \"RecordSet\"、\"Message \"和 " +"\"Context\")进一步支持发送多个模型、多套配置值和指标、客户端节点上的有状态计算以及自定义 SMPC 协议的实现等。" -#: ../../source/ref-changelog.md:962 +#: ../../source/ref-changelog.md:355 +#, fuzzy msgid "" -"**New Variational Auto-Encoder code example** " -"([#752](https://github.com/adap/flower/pull/752))" -msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" +"**Introduce Flower Mods (preview)** " +"([#3054](https://github.com/adap/flower/pull/3054), " +"[#2911](https://github.com/adap/flower/pull/2911), " +"[#3083](https://github.com/adap/flower/pull/3083))" +msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:964 +#: ../../source/ref-changelog.md:357 +#, fuzzy msgid "" -"**New scikit-learn code example** " -"([#748](https://github.com/adap/flower/pull/748))" -msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" +"Flower Modifiers (we call them Mods) can intercept messages and analyze, " +"edit or handle them directly. Mods can be used to develop pluggable " +"modules that work across different projects. Flower 1.8 already includes " +"mods to log the size of a message, the number of parameters sent over the" +" network, differential privacy with fixed clipping and adaptive clipping," +" local differential privacy and secure aggregation protocols SecAgg and " +"SecAgg+. The Flower Mods API is released as a preview, but researchers " +"can already use it to experiment with arbirtrary SMPC protocols." +msgstr "" +"Flower Modifiers(我们称之为 " +"Mods)可以拦截信息,并直接对其进行分析、编辑或处理。修改器可用于开发可在不同项目中使用的可插拔模块。Flower 1.8 " +"已经包含了记录信息大小、通过网络发送的参数数量、固定剪切和自适应剪切的差分隐私、本地差分隐私以及安全聚合协议 SecAgg 和 SecAgg+ 的" +" Mods。Flower Mods API 作为预览版发布,但研究人员已经可以用它来试验任意的 SMPC 协议。" -#: ../../source/ref-changelog.md:966 +#: ../../source/ref-changelog.md:359 +#, fuzzy msgid "" -"**New experimental TensorBoard strategy** " -"([#789](https://github.com/adap/flower/pull/789))" -msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" +"**Fine-tune LLMs with LLM FlowerTune** " +"([#3029](https://github.com/adap/flower/pull/3029), " +"[#3089](https://github.com/adap/flower/pull/3089), " +"[#3092](https://github.com/adap/flower/pull/3092), " +"[#3100](https://github.com/adap/flower/pull/3100), " +"[#3114](https://github.com/adap/flower/pull/3114), " +"[#3162](https://github.com/adap/flower/pull/3162), " +"[#3172](https://github.com/adap/flower/pull/3172))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/ref-changelog.md:970 +#: ../../source/ref-changelog.md:361 +#, fuzzy msgid "" -"Improved advanced TensorFlow code example " -"([#769](https://github.com/adap/flower/pull/769))" -msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" +"We are introducing LLM FlowerTune, an introductory example that " +"demonstrates federated LLM fine-tuning of pre-trained Llama2 models on " +"the Alpaca-GPT4 dataset. The example is built to be easily adapted to use" +" different models and/or datasets. Read our blog post [LLM FlowerTune: " +"Federated LLM Fine-tuning with Flower](https://flower.ai/blog/2024-03-14" +"-llm-flowertune-federated-llm-finetuning-with-flower/) for more details." +msgstr "" +"我们将介绍 LLM FlowerTune,这是一个介绍性示例,演示了在 Alpaca-GPT4 数据集上对预先训练好的 Llama2 模型进行联合" +" LLM 微调。该示例可轻松调整以使用不同的模型和/或数据集。请阅读我们的博文 [LLM FlowerTune: Federated LLM " +"Fine-tuning with Flower](https://flower.ai/blog/2024-03-14-llm-" +"flowertune-federated-llm-finetuning-with-flower/) 了解更多详情。" -#: ../../source/ref-changelog.md:971 +#: ../../source/ref-changelog.md:363 +#, fuzzy msgid "" -"Warning when `min_available_clients` is misconfigured " -"([#830](https://github.com/adap/flower/pull/830))" +"**Introduce built-in Differential Privacy (preview)** " +"([#2798](https://github.com/adap/flower/pull/2798), " +"[#2959](https://github.com/adap/flower/pull/2959), " +"[#3038](https://github.com/adap/flower/pull/3038), " +"[#3147](https://github.com/adap/flower/pull/3147), " +"[#2909](https://github.com/adap/flower/pull/2909), " +"[#2893](https://github.com/adap/flower/pull/2893), " +"[#2892](https://github.com/adap/flower/pull/2892), " +"[#3039](https://github.com/adap/flower/pull/3039), " +"[#3074](https://github.com/adap/flower/pull/3074))" msgstr "" -"当 `min_available_clients` 配置错误时发出警告 " -"([#830](https://github.com/adap/flower/pull/830))" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:972 +#: ../../source/ref-changelog.md:365 +#, fuzzy msgid "" -"Improved gRPC server docs " -"([#841](https://github.com/adap/flower/pull/841))" -msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" +"Built-in Differential Privacy is here! Flower supports both central and " +"local differential privacy (DP). Central DP can be configured with either" +" fixed or adaptive clipping. The clipping can happen either on the " +"server-side or the client-side. Local DP does both clipping and noising " +"on the client-side. A new documentation page [explains Differential " +"Privacy approaches](https://flower.ai/docs/framework/explanation-" +"differential-privacy.html) and a new how-to guide describes [how to use " +"the new Differential Privacy components](https://flower.ai/docs/framework" +"/how-to-use-differential-privacy.html) in Flower." +msgstr "" +"内置差分保密功能!Flower 支持中央和本地差分保密 (DP)。中央差分隐私可配置为固定或自适应剪切。剪切可以发生在服务器端或客户端。本地 DP" +" 在客户端进行剪切和噪声处理。新的文档页面[解释差分隐私方法](https://flower.ai/docs/framework" +"/explanation-differential-privacy.html) " +"和新的操作指南[如何使用新的差分隐私组件](https://flower.ai/docs/framework/how-to-use-" +"differential-privacy.html) 介绍了 Flower 的使用方法。" -#: ../../source/ref-changelog.md:973 +#: ../../source/ref-changelog.md:367 +#, fuzzy msgid "" -"Improved error message in `NumPyClient` " -"([#851](https://github.com/adap/flower/pull/851))" -msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" +"**Introduce built-in Secure Aggregation (preview)** " +"([#3120](https://github.com/adap/flower/pull/3120), " +"[#3110](https://github.com/adap/flower/pull/3110), " +"[#3108](https://github.com/adap/flower/pull/3108))" +msgstr "" +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/ref-changelog.md:974 +#: ../../source/ref-changelog.md:369 +#, fuzzy msgid "" -"Improved PyTorch quickstart code example " -"([#852](https://github.com/adap/flower/pull/852))" -msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" +"Built-in Secure Aggregation is here! Flower now supports different secure" +" aggregation protocols out-of-the-box. The best part? You can add secure " +"aggregation to your Flower projects with only a few lines of code. In " +"this initial release, we inlcude support for SecAgg and SecAgg+, but more" +" protocols will be implemented shortly. We'll also add detailed docs that" +" explain secure aggregation and how to use it in Flower. You can already " +"check out the new code example that shows how to use Flower to easily " +"combine Federated Learning, Differential Privacy and Secure Aggregation " +"in the same project." +msgstr "" +"内置安全聚合功能!Flower 现在支持不同的安全聚合协议。最棒的是什么?只需几行代码,您就可以将安全聚合添加到 Flower " +"项目中。在这个初始版本中,我们包含了对 SecAgg 和 SecAgg+ " +"的支持,但更多协议将很快实现。我们还将添加详细的文档,解释安全聚合以及如何在 Flower 中使用它。您可以查看新的代码示例,了解如何使用 " +"Flower 在同一项目中轻松结合联合学习、差分隐私和安全聚合。" -#: ../../source/ref-changelog.md:978 +#: ../../source/ref-changelog.md:371 +#, fuzzy msgid "" -"**Disabled final distributed evaluation** " -"([#800](https://github.com/adap/flower/pull/800))" -msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" - -#: ../../source/ref-changelog.md:980 -msgid "" -"Prior behaviour was to perform a final round of distributed evaluation on" -" all connected clients, which is often not required (e.g., when using " -"server-side evaluation). The prior behaviour can be enabled by passing " -"`force_final_distributed_eval=True` to `start_server`." +"**Introduce** `flwr` **CLI (preview)** " +"([#2942](https://github.com/adap/flower/pull/2942), " +"[#3055](https://github.com/adap/flower/pull/3055), " +"[#3111](https://github.com/adap/flower/pull/3111), " +"[#3130](https://github.com/adap/flower/pull/3130), " +"[#3136](https://github.com/adap/flower/pull/3136), " +"[#3094](https://github.com/adap/flower/pull/3094), " +"[#3059](https://github.com/adap/flower/pull/3059), " +"[#3049](https://github.com/adap/flower/pull/3049), " +"[#3142](https://github.com/adap/flower/pull/3142))" msgstr "" -"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" -" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" - -#: ../../source/ref-changelog.md:982 -msgid "" -"**Renamed q-FedAvg strategy** " -"([#802](https://github.com/adap/flower/pull/802))" -msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/ref-changelog.md:984 +#: ../../source/ref-changelog.md:373 +#, fuzzy msgid "" -"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " -"the notation given in the original paper (q-FFL is the optimization " -"objective, q-FedAvg is the proposed solver). Note the original (now " -"deprecated) `QffedAvg` class is still available for compatibility reasons" -" (it will be removed in a future release)." -msgstr "" -"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " -"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" +"A new `flwr` CLI command allows creating new Flower projects (`flwr new`)" +" and then running them using the Simulation Engine (`flwr run`)." +msgstr "新的 `flwr` CLI 命令允许创建新的 Flower 项目(`flwr new`),然后使用仿真引擎运行它们(`flwr run`)。" -#: ../../source/ref-changelog.md:986 +#: ../../source/ref-changelog.md:375 +#, fuzzy msgid "" -"**Deprecated and renamed code example** `simulation_pytorch` **to** " -"`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**Introduce Flower Next Simulation Engine** " +"([#3024](https://github.com/adap/flower/pull/3024), " +"[#3061](https://github.com/adap/flower/pull/3061), " +"[#2997](https://github.com/adap/flower/pull/2997), " +"[#2783](https://github.com/adap/flower/pull/2783), " +"[#3184](https://github.com/adap/flower/pull/3184), " +"[#3075](https://github.com/adap/flower/pull/3075), " +"[#3047](https://github.com/adap/flower/pull/3047), " +"[#2998](https://github.com/adap/flower/pull/2998), " +"[#3009](https://github.com/adap/flower/pull/3009), " +"[#3008](https://github.com/adap/flower/pull/3008))" msgstr "" -"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " -"([#791](https://github.com/adap/flower/pull/791))" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/ref-changelog.md:988 +#: ../../source/ref-changelog.md:377 +#, fuzzy msgid "" -"This example has been replaced by a new example. The new example is based" -" on the experimental virtual client engine, which will become the new " -"default way of doing most types of large-scale simulations in Flower. The" -" existing example was kept for reference purposes, but it might be " -"removed in the future." +"The Flower Simulation Engine can now run Flower Next projects. For " +"notebook environments, there's also a new `run_simulation` function that " +"can run `ServerApp` and `ClientApp`." msgstr "" -"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " -"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" - -#: ../../source/ref-changelog.md:990 -msgid "v0.16.0 (2021-05-11)" -msgstr "v0.16.0 (2021-05-11)" - -#: ../../source/ref-changelog.md:994 -msgid "" -"**New built-in strategies** " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:996 -msgid "(abstract) FedOpt" -msgstr "(摘要) FedOpt" +"Flower 模拟引擎现在可以运行 Flower Next 项目。对于笔记本环境,还有一个新的 `run_simulation` 函数,可以运行 " +"`ServerApp` 和 `ClientApp`。" -#: ../../source/ref-changelog.md:999 +#: ../../source/ref-changelog.md:379 +#, fuzzy msgid "" -"**Custom metrics for server and strategies** " -"([#717](https://github.com/adap/flower/pull/717))" -msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" +"**Handle SuperNode connection errors** " +"([#2969](https://github.com/adap/flower/pull/2969))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/ref-changelog.md:1001 +#: ../../source/ref-changelog.md:381 +#, fuzzy msgid "" -"The Flower server is now fully task-agnostic, all remaining instances of " -"task-specific metrics (such as `accuracy`) have been replaced by custom " -"metrics dictionaries. Flower 0.15 introduced the capability to pass a " -"dictionary containing custom metrics from client to server. As of this " -"release, custom metrics replace task-specific metrics on the server." +"A SuperNode will now try to reconnect indefinitely to the SuperLink in " +"case of connection errors. The arguments `--max-retries` and `--max-wait-" +"time` can now be passed to the `flower-client-app` command. `--max-" +"retries` will define the number of tentatives the client should make " +"before it gives up trying to reconnect to the SuperLink, and, `--max-" +"wait-time` defines the time before the SuperNode gives up trying to " +"reconnect to the SuperLink." msgstr "" -"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " -"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" +"如果出现连接错误,超级节点现在会尝试无限期地重新连接超级链接。现在可以向 `flower-client-app` 命令传递参数 `-ax-" +"retries` 和 `-max-wait-time`。最大重试次数 \"将定义客户端在放弃重新连接超级链接之前的重试次数,而 \"最大等待时间 " +"\"则定义超级节点放弃重新连接超级链接之前的等待时间。" -#: ../../source/ref-changelog.md:1003 +#: ../../source/ref-changelog.md:383 +#, fuzzy msgid "" -"Custom metric dictionaries are now used in two user-facing APIs: they are" -" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " -"they enable evaluation functions passed to built-in strategies (via " -"`eval_fn`) to return more than two evaluation metrics. Strategies can " -"even return *aggregated* metrics dictionaries for the server to keep " -"track of." +"**General updates to Flower Baselines** " +"([#2904](https://github.com/adap/flower/pull/2904), " +"[#2482](https://github.com/adap/flower/pull/2482), " +"[#2985](https://github.com/adap/flower/pull/2985), " +"[#2968](https://github.com/adap/flower/pull/2968))" msgstr "" -"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " -"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " -"指标字典,以便服务器跟踪。" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/ref-changelog.md:1005 +#: ../../source/ref-changelog.md:385 +#, fuzzy msgid "" -"Strategy implementations should migrate their `aggregate_fit` and " -"`aggregate_evaluate` methods to the new return type (e.g., by simply " -"returning an empty `{}`), server-side evaluation functions should migrate" -" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +"There's a new [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"baseline. Several other baselined have been updated as well." msgstr "" -"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " -"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " -"`return loss, {\"accuracy\": accuracy}`。" +"有一条新的 [FedStar](https://flower.ai/docs/baselines/fedstar.html) " +"基准线。其他几条基准线也已更新。" -#: ../../source/ref-changelog.md:1007 +#: ../../source/ref-changelog.md:387 +#, fuzzy msgid "" -"Flower 0.15-style return types are deprecated (but still supported), " -"compatibility will be removed in a future release." -msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" +"**Improve documentation and translations** " +"([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" +msgstr "" +"**改进文件和翻译** ([#3050](https://github.com/adap/flower/pull/3050), " +"[#3044](https://github.com/adap/flower/pull/3044), " +"[#3043](https://github.com/adap/flower/pull/3043), " +"[#2986](https://github.com/adap/flower/pull/2986), " +"[#3041](https://github.com/adap/flower/pull/3041), " +"[#3046](https://github.com/adap/flower/pull/3046), " +"[#3042](https://github.com/adap/flower/pull/3042), " +"[#2978](https://github.com/adap/flower/pull/2978), " +"[#2952](https://github.com/adap/flower/pull/2952), " +"[#3167](https://github.com/adap/flower/pull/3167), " +"[#2953](https://github.com/adap/flower/pull/2953), " +"[#3045](https://github.com/adap/flower/pull/3045), " +"[#2654](https://github.com/adap/flower/pull/2654), " +"[#3082](https://github.com/adap/flower/pull/3082), " +"[#2990](https://github.com/adap/flower/pull/2990), " +"[#2989](https://github.com/adap/flower/pull/2989))" -#: ../../source/ref-changelog.md:1009 +#: ../../source/ref-changelog.md:389 +#, fuzzy msgid "" -"**Migration warnings for deprecated functionality** " -"([#690](https://github.com/adap/flower/pull/690))" -msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" +"As usual, we merged many smaller and larger improvements to the " +"documentation. A special thank you goes to [Sebastian van der " +"Voort](https://github.com/svdvoort) for landing a big documentation PR!" +msgstr "" +"像往常一样,我们合并了许多对文档的较大和较小的改进。特别要感谢 [Sebastian van der " +"Voort](https://github.com/svdvoort),他为我们带来了一份重要的文档 PR!" -#: ../../source/ref-changelog.md:1011 +#: ../../source/ref-changelog.md:391 +#, fuzzy msgid "" -"Earlier versions of Flower were often migrated to new APIs, while " -"maintaining compatibility with legacy APIs. This release introduces " -"detailed warning messages if usage of deprecated APIs is detected. The " -"new warning messages often provide details on how to migrate to more " -"recent APIs, thus easing the transition from one release to another." +"**General updates to Flower Examples** " +"([3134](https://github.com/adap/flower/pull/3134), " +"[2996](https://github.com/adap/flower/pull/2996), " +"[2930](https://github.com/adap/flower/pull/2930), " +"[2967](https://github.com/adap/flower/pull/2967), " +"[2467](https://github.com/adap/flower/pull/2467), " +"[2910](https://github.com/adap/flower/pull/2910), " +"[#2918](https://github.com/adap/flower/pull/2918), " +"[#2773](https://github.com/adap/flower/pull/2773), " +"[#3063](https://github.com/adap/flower/pull/3063), " +"[#3116](https://github.com/adap/flower/pull/3116), " +"[#3117](https://github.com/adap/flower/pull/3117))" msgstr "" -"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " -"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/ref-changelog.md:1013 +#: ../../source/ref-changelog.md:393 +#, fuzzy msgid "" -"Improved docs and docstrings " -"([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" +"Two new examples show federated training of a Vision Transformer (ViT) " +"and federated learning in a medical context using the popular MONAI " +"library. `quickstart-pytorch` and `quickstart-tensorflow` demonstrate the" +" new Flower Next `ServerApp` and `ClientApp`. Many other examples " +"received considerable updates as well." msgstr "" -"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " -"[#692](https://github.com/adap/flower/pull/692) " -"[#713](https://github.com/adap/flower/pull/713))" - -#: ../../source/ref-changelog.md:1015 -msgid "MXNet example and documentation" -msgstr "MXNet 示例和文档" +"两个新示例展示了视觉转换器(ViT)的联合训练,以及使用流行的 MONAI 库在医疗环境中进行的联合学习。quickstart-pytorch " +"\"和 \"quickstart-tensorflow \"展示了新的 Flower Next \"ServerApp \"和 " +"\"ClientApp\"。许多其他示例也得到了大量更新。" -#: ../../source/ref-changelog.md:1017 +#: ../../source/ref-changelog.md:395 +#, fuzzy msgid "" -"FedBN implementation in example PyTorch: From Centralized To Federated " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**General improvements** " +"([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" msgstr "" -"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " -"([#696](https://github.com/adap/flower/pull/696) " -"[#702](https://github.com/adap/flower/pull/702) " -"[#705](https://github.com/adap/flower/pull/705))" +"**一般改进**([#3171](https://github.com/adap/flower/pull/3171), " +"[3099](https://github.com/adap/flower/pull/3099), " +"[3003](https://github.com/adap/flower/pull/3003), " +"[3145](https://github.com/adap/flower/pull/3145), " +"[3017](https://github.com/adap/flower/pull/3017), " +"[3085](https://github.com/adap/flower/pull/3085), " +"[3012](https://github.com/adap/flower/pull/3012), " +"[3119](https://github.com/adap/flower/pull/3119), " +"[2991](https://github.com/adap/flower/pull/2991), " +"[2970](https://github.com/adap/flower/pull/2970), " +"[2980](https://github.com/adap/flower/pull/2980), " +"[3086](https://github.com/adap/flower/pull/3086), " +"[2932](https://github.com/adap/flower/pull/2932), " +"[2928](https://github.com/adap/flower/pull/2928), " +"[2941](https://github.com/adap/flower/pull/2941), " +"[2933](https://github.com/adap/flower/pull/2933), " +"[3181](https://github.com/adap/flower/pull/3181), " +"[2973](https://github.com/adap/flower/pull/2973), " +"[2992](https://github.com/adap/flower/pull/2992), " +"[2915](https://github.com/adap/flower/pull/2915), " +"[3040](https://github.com/adap/flower/pull/3040), " +"[3022](https://github.com/adap/flower/pull/3022), " +"[3032](https://github.com/adap/flower/pull/3032), " +"[2902](https://github.com/adap/flower/pull/2902), " +"[2931](https://github.com/adap/flower/pull/2931), " +"[3005](https://github.com/adap/flower/pull/3005), " +"[3132](https://github.com/adap/flower/pull/3132), " +"[3115](https://github.com/adap/flower/pull/3115), " +"[2944](https://github.com/adap/flower/pull/2944), " +"[3064](https://github.com/adap/flower/pull/3064), " +"[3106](https://github.com/adap/flower/pull/3106), " +"[2974](https://github.com/adap/flower/pull/2974), " +"[3178](https://github.com/adap/flower/pull/3178), " +"[2993](https://github.com/adap/flower/pull/2993), " +"[3186](https://github.com/adap/flower/pull/3186), " +"[3091](https://github.com/adap/flower/pull/3091), " +"[3125](https://github.com/adap/flower/pull/3125), " +"[3093](https://github.com/adap/flower/pull/3093), " +"[3013](https://github.com/adap/flower/pull/3013), " +"[3033](https://github.com/adap/flower/pull/3033), " +"[3133](https://github.com/adap/flower/pull/3133), " +"[3068](https://github.com/adap/flower/pull/3068), " +"[2916](https://github.com/adap/flower/pull/2916), " +"[2975](https://github.com/adap/flower/pull/2975), " +"[2984](https://github.com/adap/flower/pull/2984), " +"[2846](https://github.com/adap/flower/pull/2846), " +"[3077](https://github.com/adap/flower/pull/3077), " +"[3143](https://github.com/adap/flower/pull/3143), " +"[2921](https://github.com/adap/flower/pull/2921), " +"[3101](https://github.com/adap/flower/pull/3101), " +"[2927](https://github.com/adap/flower/pull/2927), " +"[2995](https://github.com/adap/flower/pull/2995), " +"[2972](https://github.com/adap/flower/pull/2972), " +"[2912](https://github.com/adap/flower/pull/2912), " +"[3065](https://github.com/adap/flower/pull/3065), " +"[3028](https://github.com/adap/flower/pull/3028), " +"[2922](https://github.com/adap/flower/pull/2922), " +"[2982](https://github.com/adap/flower/pull/2982), " +"[2914](https://github.com/adap/flower/pull/2914), " +"[3179](https://github.com/adap/flower/pull/3179), " +"[3080](https://github.com/adap/flower/pull/3080), " +"[2994](https://github.com/adap/flower/pull/2994), " +"[3187](https://github.com/adap/flower/pull/3187), " +"[2926](https://github.com/adap/flower/pull/2926), " +"[3018](https://github.com/adap/flower/pull/3018), " +"[3144](https://github.com/adap/flower/pull/3144), " +"[3011](https://github.com/adap/flower/pull/3011), " +"[#3152](https://github.com/adap/flower/pull/3152), " +"[#2836](https://github.com/adap/flower/pull/2836), " +"[#2929](https://github.com/adap/flower/pull/2929), " +"[#2943](https://github.com/adap/flower/pull/2943), " +"[#2955](https://github.com/adap/flower/pull/2955), " +"[#2954](https://github.com/adap/flower/pull/2954))" -#: ../../source/ref-changelog.md:1021 -msgid "" -"**Serialization-agnostic server** " -"([#721](https://github.com/adap/flower/pull/721))" -msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" +#: ../../source/ref-changelog.md:401 +#, fuzzy +msgid "v1.7.0 (2024-02-05)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/ref-changelog.md:1023 +#: ../../source/ref-changelog.md:407 +#, fuzzy msgid "" -"The Flower server is now fully serialization-agnostic. Prior usage of " -"class `Weights` (which represents parameters as deserialized NumPy " -"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " -"`Parameters` objects are fully serialization-agnostic and represents " -"parameters as byte arrays, the `tensor_type` attributes indicates how " -"these byte arrays should be interpreted (e.g., for " -"serialization/deserialization)." +"`Aasheesh Singh`, `Adam Narozniak`, `Aml Hassan Esmil`, `Charles " +"Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo " +"Gabrielli`, `Gustavo Bertoli`, `HelinLin`, `Heng Pan`, `Javier`, `M S " +"Chaitanya Kumar`, `Mohammad Naseri`, `Nikos Vlachakis`, `Pritam Neog`, " +"`Robert Kuska`, `Robert Steiner`, `Taner Topal`, `Yahia Salaheldin " +"Shaaban`, `Yan Gao`, `Yasar Abbas` " msgstr "" -"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " -"`Parameters` 类取代(例如在 `Strategy`中)。参数 " -"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/ref-changelog.md:1025 +#: ../../source/ref-changelog.md:411 +#, fuzzy msgid "" -"Built-in strategies implement this approach by handling serialization and" -" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " -"implementations should update to the slightly changed Strategy method " -"definitions. Strategy authors can consult PR " -"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" -" easily migrate to the new format." +"**Introduce stateful clients (experimental)** " +"([#2770](https://github.com/adap/flower/pull/2770), " +"[#2686](https://github.com/adap/flower/pull/2686), " +"[#2696](https://github.com/adap/flower/pull/2696), " +"[#2643](https://github.com/adap/flower/pull/2643), " +"[#2769](https://github.com/adap/flower/pull/2769))" msgstr "" -"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" -" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" +"** baselines的普通更新** ([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435))" -#: ../../source/ref-changelog.md:1027 +#: ../../source/ref-changelog.md:413 +#, fuzzy msgid "" -"Deprecated `flwr.server.Server.evaluate`, use " -"`flwr.server.Server.evaluate_round` instead " -"([#717](https://github.com/adap/flower/pull/717))" +"Subclasses of `Client` and `NumPyClient` can now store local state that " +"remains on the client. Let's start with the highlight first: this new " +"feature is compatible with both simulated clients (via " +"`start_simulation`) and networked clients (via `start_client`). It's also" +" the first preview of new abstractions like `Context` and `RecordSet`. " +"Clients can access state of type `RecordSet` via `state: RecordSet = " +"self.context.state`. Changes to this `RecordSet` are preserved across " +"different rounds of execution to enable stateful computations in a " +"unified way across simulation and deployment." msgstr "" -"已弃用 `flwr.server.Server.evaluate`,改用 " -"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" - -#: ../../source/ref-changelog.md:1029 -msgid "v0.15.0 (2021-03-12)" -msgstr "v0.15.0 (2021-03-12)" +"客户端 \"和 \"NumPyClient \"的子类现在可以存储保留在客户端上的本地状态。让我们先从亮点开始:这一新功能与模拟客户端(通过 " +"`start_simulation`)和网络客户端(通过 `start_client`)兼容。这也是 `Context` 和 " +"`RecordSet` 等新抽象的首次预览。客户端可以通过 `state.RecordSet` 访问 `RecordSet` 类型的状态: " +"RecordSet = self.context.state`。对该 `RecordSet` " +"的更改会在不同轮执行中保留,以便在模拟和部署中以统一的方式进行有状态计算。" -#: ../../source/ref-changelog.md:1033 +#: ../../source/ref-changelog.md:415 +#, fuzzy msgid "" -"**Server-side parameter initialization** " -"([#658](https://github.com/adap/flower/pull/658))" -msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" +"**Improve performance** " +"([#2293](https://github.com/adap/flower/pull/2293))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/ref-changelog.md:1035 +#: ../../source/ref-changelog.md:417 +#, fuzzy msgid "" -"Model parameters can now be initialized on the server-side. Server-side " -"parameter initialization works via a new `Strategy` method called " -"`initialize_parameters`." +"Flower is faster than ever. All `FedAvg`-derived strategies now use in-" +"place aggregation to reduce memory consumption. The Flower client " +"serialization/deserialization has been rewritten from the ground up, " +"which results in significant speedups, especially when the client-side " +"training time is short." msgstr "" -"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " -"\"方法进行。" +"Flower 的速度比以往更快。所有源于 `FedAvg` 的策略现在都使用就地聚合,以减少内存消耗。Flower " +"客户端序列化/解序列化已从头开始重写,从而显著提高了速度,尤其是在客户端训练时间较短的情况下。" -#: ../../source/ref-changelog.md:1037 +#: ../../source/ref-changelog.md:419 +#, fuzzy msgid "" -"Built-in strategies support a new constructor argument called " -"`initial_parameters` to set the initial parameters. Built-in strategies " -"will provide these initial parameters to the server on startup and then " -"delete them to free the memory afterwards." +"**Support Federated Learning with Apple MLX and Flower** " +"([#2693](https://github.com/adap/flower/pull/2693))" msgstr "" -"内置策略支持名为 \"initial_parameters " -"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/ref-changelog.md:1056 +#: ../../source/ref-changelog.md:421 +#, fuzzy msgid "" -"If no initial parameters are provided to the strategy, the server will " -"continue to use the current behaviour (namely, it will ask one of the " -"connected clients for its parameters and use these as the initial global " -"parameters)." -msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" +"Flower has official support for federated learning using [Apple " +"MLX](https://ml-explore.github.io/mlx) via the new `quickstart-mlx` code " +"example." +msgstr "" +"通过新的 `quickstart-mlx` 代码示例,Flower 正式支持使用 [Apple MLX](https://ml-" +"explore.github.io/mlx)的联合学习。" -#: ../../source/ref-changelog.md:1060 +#: ../../source/ref-changelog.md:423 +#, fuzzy msgid "" -"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " -"`flwr.server.strategy.FedAvg`, which is equivalent)" +"**Introduce new XGBoost cyclic strategy** " +"([#2666](https://github.com/adap/flower/pull/2666), " +"[#2668](https://github.com/adap/flower/pull/2668))" msgstr "" -"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " -"`flwr.server.strategy.FedAvg`)" - -#: ../../source/ref-changelog.md:1062 -msgid "v0.14.0 (2021-02-18)" -msgstr "v0.14.0 (2021-02-18)" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/ref-changelog.md:1066 +#: ../../source/ref-changelog.md:425 +#, fuzzy msgid "" -"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"A new strategy called `FedXgbCyclic` supports a client-by-client style of" +" training (often called cyclic). The `xgboost-comprehensive` code example" +" shows how to use it in a full project. In addition to that, `xgboost-" +"comprehensive` now also supports simulation mode. With this, Flower " +"offers best-in-class XGBoost support." msgstr "" -"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " -"([#610](https://github.com/adap/flower/pull/610) " -"[#572](https://github.com/adap/flower/pull/572) " -"[#633](https://github.com/adap/flower/pull/633))" +"名为 `FedXgbCyclic` 的新策略支持逐个客户端的训练风格(通常称为循环)。xgboost-comprehensive " +"\"代码示例展示了如何在一个完整的项目中使用它。除此之外,`xgboost-comprehensive` 现在还支持模拟模式。由此,Flower " +"提供了同类最佳的 XGBoost 支持。" -#: ../../source/ref-changelog.md:1068 +#: ../../source/ref-changelog.md:427 +#, fuzzy msgid "" -"Clients can now return an additional dictionary mapping `str` keys to " -"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " -"This means one can return almost arbitrary values from `fit`/`evaluate` " -"and make use of them on the server side!" -msgstr "" -"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " -"返回几乎任意的值,并在服务器端使用它们!" +"**Support Python 3.11** " +"([#2394](https://github.com/adap/flower/pull/2394))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/ref-changelog.md:1070 +#: ../../source/ref-changelog.md:429 +#, fuzzy msgid "" -"This improvement also allowed for more consistent return types between " -"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " -"dict)` representing the loss, number of examples, and a dictionary " -"holding arbitrary problem-specific values like accuracy." -msgstr "" -"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " -"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" +"Framework tests now run on Python 3.8, 3.9, 3.10, and 3.11. This will " +"ensure better support for users using more recent Python versions." +msgstr "框架测试现在可在 Python 3.8、3.9、3.10 和 3.11 上运行。这将确保为使用最新 Python 版本的用户提供更好的支持。" -#: ../../source/ref-changelog.md:1072 +#: ../../source/ref-changelog.md:431 +#, fuzzy msgid "" -"In case you wondered: this feature is compatible with existing projects, " -"the additional dictionary return value is optional. New code should " -"however migrate to the new return types to be compatible with upcoming " -"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " -"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " -"details." +"**Update gRPC and ProtoBuf dependencies** " +"([#2814](https://github.com/adap/flower/pull/2814))" msgstr "" -"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " -"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " -"`float, int, Dict[str, Scalar]`)。详见下面的示例。" +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-changelog.md:1074 +#: ../../source/ref-changelog.md:433 +#, fuzzy msgid "" -"*Code example:* note the additional dictionary return values in both " -"`FlwrClient.fit` and `FlwrClient.evaluate`:" -msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" +"The `grpcio` and `protobuf` dependencies were updated to their latest " +"versions for improved security and performance." +msgstr "为提高安全性和性能,\"grpcio \"和 \"protobuf \"依赖项已更新至最新版本。" -#: ../../source/ref-changelog.md:1089 +#: ../../source/ref-changelog.md:435 +#, fuzzy msgid "" -"**Generalized** `config` **argument in** `Client.fit` **and** " -"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +"**Introduce Docker image for Flower server** " +"([#2700](https://github.com/adap/flower/pull/2700), " +"[#2688](https://github.com/adap/flower/pull/2688), " +"[#2705](https://github.com/adap/flower/pull/2705), " +"[#2695](https://github.com/adap/flower/pull/2695), " +"[#2747](https://github.com/adap/flower/pull/2747), " +"[#2746](https://github.com/adap/flower/pull/2746), " +"[#2680](https://github.com/adap/flower/pull/2680), " +"[#2682](https://github.com/adap/flower/pull/2682), " +"[#2701](https://github.com/adap/flower/pull/2701))" msgstr "" -"**在**`Client.fit` " -"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/ref-changelog.md:1091 +#: ../../source/ref-changelog.md:437 +#, fuzzy msgid "" -"The `config` argument used to be of type `Dict[str, str]`, which means " -"that dictionary values were expected to be strings. The new release " -"generalizes this to enable values of the following types: `bool`, " -"`bytes`, `float`, `int`, `str`." +"The Flower server can now be run using an official Docker image. A new " +"how-to guide explains [how to run Flower using " +"Docker](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html). An official Flower client Docker image will follow." msgstr "" -"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " -"bool`、`bytes`、`float`、`int`、`str`。" +"现在可以使用官方 Docker 映像运行 Flower 服务器了。新的操作指南介绍了 [如何使用 Docker 运行 " +"Flower](https://flower.ai/docs/framework/how-to-run-flower-using-" +"docker.html)。Flower 客户端 Docker 官方镜像将随后发布。" -#: ../../source/ref-changelog.md:1093 +#: ../../source/ref-changelog.md:439 +#, fuzzy msgid "" -"This means one can now pass almost arbitrary values to `fit`/`evaluate` " -"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" -"side and `int(config[\"epochs\"])` on the client side!" +"**Introduce** `flower-via-docker-compose` **example** " +"([#2626](https://github.com/adap/flower/pull/2626))" msgstr "" -"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " -"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-changelog.md:1095 +#: ../../source/ref-changelog.md:441 +#, fuzzy msgid "" -"*Code example:* note that the `config` dictionary now contains non-`str` " -"values in both `Client.fit` and `Client.evaluate`:" -msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" - -#: ../../source/ref-changelog.md:1112 -msgid "v0.13.0 (2021-01-08)" -msgstr "v0.13.0 (2021-01-08)" +"**Introduce** `quickstart-sklearn-tabular` **example** " +"([#2719](https://github.com/adap/flower/pull/2719))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:1116 +#: ../../source/ref-changelog.md:443 +#, fuzzy msgid "" -"New example: PyTorch From Centralized To Federated " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" - -#: ../../source/ref-changelog.md:1117 -msgid "Improved documentation" -msgstr "改进文档" - -#: ../../source/ref-changelog.md:1118 -msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" -msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" - -#: ../../source/ref-changelog.md:1119 -msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" -msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" +"**Introduce** `custom-metrics` **example** " +"([#1958](https://github.com/adap/flower/pull/1958))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/ref-changelog.md:1120 +#: ../../source/ref-changelog.md:445 +#, fuzzy msgid "" -"Updated examples documentation " -"([#549](https://github.com/adap/flower/pull/549))" -msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" +"**Update code examples to use Flower Datasets** " +"([#2450](https://github.com/adap/flower/pull/2450), " +"[#2456](https://github.com/adap/flower/pull/2456), " +"[#2318](https://github.com/adap/flower/pull/2318), " +"[#2712](https://github.com/adap/flower/pull/2712))" +msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/ref-changelog.md:1121 +#: ../../source/ref-changelog.md:447 +#, fuzzy msgid "" -"Removed obsolete documentation " -"([#548](https://github.com/adap/flower/pull/548))" -msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" - -#: ../../source/ref-changelog.md:1123 -msgid "Bugfix:" -msgstr "错误修正:" +"Several code examples were updated to use [Flower " +"Datasets](https://flower.ai/docs/datasets/)." +msgstr "更新了多个代码示例,以使用 [Flower Datasets](https://flower.ai/docs/datasets/) 。" -#: ../../source/ref-changelog.md:1125 +#: ../../source/ref-changelog.md:449 +#, fuzzy msgid "" -"`Server.fit` does not disconnect clients when finished, disconnecting the" -" clients is now handled in `flwr.server.start_server` " -"([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))." +"**General updates to Flower Examples** " +"([#2381](https://github.com/adap/flower/pull/2381), " +"[#2805](https://github.com/adap/flower/pull/2805), " +"[#2782](https://github.com/adap/flower/pull/2782), " +"[#2806](https://github.com/adap/flower/pull/2806), " +"[#2829](https://github.com/adap/flower/pull/2829), " +"[#2825](https://github.com/adap/flower/pull/2825), " +"[#2816](https://github.com/adap/flower/pull/2816), " +"[#2726](https://github.com/adap/flower/pull/2726), " +"[#2659](https://github.com/adap/flower/pull/2659), " +"[#2655](https://github.com/adap/flower/pull/2655))" msgstr "" -"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " -"\"中处理的([#553](https://github.com/adap/flower/pull/553) " -"[#540](https://github.com/adap/flower/issues/540))。" - -#: ../../source/ref-changelog.md:1127 -msgid "v0.12.0 (2020-12-07)" -msgstr "v0.12.0 (2020-12-07)" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/ref-changelog.md:1129 ../../source/ref-changelog.md:1145 -msgid "Important changes:" -msgstr "重要变更:" +#: ../../source/ref-changelog.md:451 +#, fuzzy +msgid "Many Flower code examples received substantial updates." +msgstr "许多 \"Flower \"代码示例得到了大幅更新。" -#: ../../source/ref-changelog.md:1131 -msgid "" -"Added an example for embedded devices " -"([#507](https://github.com/adap/flower/pull/507))" -msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" +#: ../../source/ref-changelog.md:453 ../../source/ref-changelog.md:546 +msgid "**Update Flower Baselines**" +msgstr "**更新 Flower Baselines**" -#: ../../source/ref-changelog.md:1132 +#: ../../source/ref-changelog.md:455 +#, fuzzy msgid "" -"Added a new NumPyClient (in addition to the existing KerasClient) " -"([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508))" +"HFedXGBoost ([#2226](https://github.com/adap/flower/pull/2226), " +"[#2771](https://github.com/adap/flower/pull/2771))" msgstr "" -"添加了一个新的 NumPyClient(除现有的 KerasClient " -"之外)([#504](https://github.com/adap/flower/pull/504) " -"[#508](https://github.com/adap/flower/pull/508)" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-changelog.md:1133 -msgid "" -"Deprecated `flwr_example` package and started to migrate examples into " -"the top-level `examples` directory " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" -msgstr "" -"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " -"([#494](https://github.com/adap/flower/pull/494) " -"[#512](https://github.com/adap/flower/pull/512))" - -#: ../../source/ref-changelog.md:1135 -msgid "v0.11.0 (2020-11-30)" -msgstr "v0.11.0 (2020-11-30)" - -#: ../../source/ref-changelog.md:1137 -msgid "Incompatible changes:" -msgstr "不兼容的更改:" - -#: ../../source/ref-changelog.md:1139 -msgid "" -"Renamed strategy methods " -"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " -"Flower's public APIs. Other public methods/functions (e.g., every method " -"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " -"which is why we're removing it from the four methods in Strategy. To " -"migrate rename the following `Strategy` methods accordingly:" -msgstr "" -"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " -"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " -"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" - -#: ../../source/ref-changelog.md:1140 -msgid "`on_configure_evaluate` => `configure_evaluate`" -msgstr "`on_configure_evaluate` => `configure_evaluate`" - -#: ../../source/ref-changelog.md:1141 -msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" -msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" - -#: ../../source/ref-changelog.md:1142 -msgid "`on_configure_fit` => `configure_fit`" -msgstr "`on_configure_fit` => `configure_fit`" - -#: ../../source/ref-changelog.md:1143 -msgid "`on_aggregate_fit` => `aggregate_fit`" -msgstr "`on_aggregate_fit` => `aggregate_fit`" +#: ../../source/ref-changelog.md:456 +#, fuzzy +msgid "FedVSSL ([#2412](https://github.com/adap/flower/pull/2412))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:1147 -msgid "" -"Deprecated `DefaultStrategy` " -"([#479](https://github.com/adap/flower/pull/479)). To migrate use " -"`FedAvg` instead." -msgstr "" -"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " -"。迁移时请使用 `FedAvg`。" +#: ../../source/ref-changelog.md:457 +#, fuzzy +msgid "FedNova ([#2179](https://github.com/adap/flower/pull/2179))" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/ref-changelog.md:1148 -msgid "" -"Simplified examples and baselines " -"([#484](https://github.com/adap/flower/pull/484))." -msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" +#: ../../source/ref-changelog.md:458 +#, fuzzy +msgid "HeteroFL ([#2439](https://github.com/adap/flower/pull/2439))" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/ref-changelog.md:1149 -msgid "" -"Removed presently unused `on_conclude_round` from strategy interface " -"([#483](https://github.com/adap/flower/pull/483))." -msgstr "" -"删除了策略界面中目前未使用的 " -"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" +#: ../../source/ref-changelog.md:459 +#, fuzzy +msgid "FedAvgM ([#2246](https://github.com/adap/flower/pull/2246))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:1150 -msgid "" -"Set minimal Python version to 3.6.1 instead of 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." -msgstr "" -"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " -"([#471](https://github.com/adap/flower/pull/471))." +#: ../../source/ref-changelog.md:460 +#, fuzzy +msgid "FedPara ([#2722](https://github.com/adap/flower/pull/2722))" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/ref-changelog.md:1151 +#: ../../source/ref-changelog.md:462 +#, fuzzy msgid "" -"Improved `Strategy` docstrings " -"([#470](https://github.com/adap/flower/pull/470))." +"**Improve documentation** " +"([#2674](https://github.com/adap/flower/pull/2674), " +"[#2480](https://github.com/adap/flower/pull/2480), " +"[#2826](https://github.com/adap/flower/pull/2826), " +"[#2727](https://github.com/adap/flower/pull/2727), " +"[#2761](https://github.com/adap/flower/pull/2761), " +"[#2900](https://github.com/adap/flower/pull/2900))" msgstr "" -"改进了 `Strategy` " -"docstrings([#470](https://github.com/adap/flower/pull/470))。" - -#: ../../source/ref-example-projects.rst:2 -msgid "Example projects" -msgstr "项目实例" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/ref-example-projects.rst:4 +#: ../../source/ref-changelog.md:464 +#, fuzzy msgid "" -"Flower comes with a number of usage examples. The examples demonstrate " -"how Flower can be used to federate different kinds of existing machine " -"learning pipelines, usually leveraging popular machine learning " -"frameworks such as `PyTorch `_ or `TensorFlow " -"`_." +"**Improved testing and development infrastructure** " +"([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" msgstr "" -"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " -"`PyTorch `_ 或 `TensorFlow " -"`_。" +"**改进测试和开发基础设施** ([#2797](https://github.com/adap/flower/pull/2797), " +"[#2676](https://github.com/adap/flower/pull/2676), " +"[#2644](https://github.com/adap/flower/pull/2644), " +"[#2656](https://github.com/adap/flower/pull/2656), " +"[#2848](https://github.com/adap/flower/pull/2848), " +"[#2675](https://github.com/adap/flower/pull/2675), " +"[#2735](https://github.com/adap/flower/pull/2735), " +"[#2767](https://github.com/adap/flower/pull/2767), " +"[#2732](https://github.com/adap/flower/pull/2732), " +"[#2744](https://github.com/adap/flower/pull/2744), " +"[#2681](https://github.com/adap/flower/pull/2681), " +"[#2699](https://github.com/adap/flower/pull/2699), " +"[#2745](https://github.com/adap/flower/pull/2745), " +"[#2734](https://github.com/adap/flower/pull/2734), " +"[#2731](https://github.com/adap/flower/pull/2731), " +"[#2652](https://github.com/adap/flower/pull/2652), " +"[#2720](https://github.com/adap/flower/pull/2720), " +"[#2721](https://github.com/adap/flower/pull/2721), " +"[#2717](https://github.com/adap/flower/pull/2717), " +"[#2864](https://github.com/adap/flower/pull/2864), " +"[#2694](https://github.com/adap/flower/pull/2694), " +"[#2709](https://github.com/adap/flower/pull/2709), " +"[#2658](https://github.com/adap/flower/pull/2658), " +"[#2796](https://github.com/adap/flower/pull/2796), " +"[#2692](https://github.com/adap/flower/pull/2692), " +"[#2657](https://github.com/adap/flower/pull/2657), " +"[#2813](https://github.com/adap/flower/pull/2813), " +"[#2661](https://github.com/adap/flower/pull/2661), " +"[#2398](https://github.com/adap/flower/pull/2398))" -#: ../../source/ref-example-projects.rst:10 +#: ../../source/ref-changelog.md:466 #, fuzzy msgid "" -"The following examples are available as standalone projects. Quickstart " -"TensorFlow/Keras ---------------------------" -msgstr "以下示例可作为独立项目使用。" - -#: ../../source/ref-example-projects.rst:14 -msgid "" -"The TensorFlow/Keras quickstart example shows CIFAR-10 image " -"classification with MobileNetV2:" -msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" - -#: ../../source/ref-example-projects.rst:17 -msgid "" -"`Quickstart TensorFlow (Code) " -"`_" -msgstr "" -"`TensorFlow快速入门 (代码) `_" +"The Flower testing and development infrastructure has received " +"substantial updates. This makes Flower 1.7 the most tested release ever." +msgstr "Flower 测试和开发基础架构已得到大幅更新。这使得 Flower 1.7 成为有史以来经过最多测试的版本。" -#: ../../source/ref-example-projects.rst:18 +#: ../../source/ref-changelog.md:468 #, fuzzy -msgid ":doc:`Quickstart TensorFlow (Tutorial) `" -msgstr "" -"`TensorFlow快速入门 (教程) `_" - -#: ../../source/ref-example-projects.rst:19 msgid "" -"`Quickstart TensorFlow (Blog Post) `_" +"**Update dependencies** " +"([#2753](https://github.com/adap/flower/pull/2753), " +"[#2651](https://github.com/adap/flower/pull/2651), " +"[#2739](https://github.com/adap/flower/pull/2739), " +"[#2837](https://github.com/adap/flower/pull/2837), " +"[#2788](https://github.com/adap/flower/pull/2788), " +"[#2811](https://github.com/adap/flower/pull/2811), " +"[#2774](https://github.com/adap/flower/pull/2774), " +"[#2790](https://github.com/adap/flower/pull/2790), " +"[#2751](https://github.com/adap/flower/pull/2751), " +"[#2850](https://github.com/adap/flower/pull/2850), " +"[#2812](https://github.com/adap/flower/pull/2812), " +"[#2872](https://github.com/adap/flower/pull/2872), " +"[#2736](https://github.com/adap/flower/pull/2736), " +"[#2756](https://github.com/adap/flower/pull/2756), " +"[#2857](https://github.com/adap/flower/pull/2857), " +"[#2757](https://github.com/adap/flower/pull/2757), " +"[#2810](https://github.com/adap/flower/pull/2810), " +"[#2740](https://github.com/adap/flower/pull/2740), " +"[#2789](https://github.com/adap/flower/pull/2789))" msgstr "" -"`TensorFlow快速入门 (博客) `_" - -#: ../../source/ref-example-projects.rst:23 -#: ../../source/tutorial-quickstart-pytorch.rst:5 -msgid "Quickstart PyTorch" -msgstr "PyTorch快速入门" - -#: ../../source/ref-example-projects.rst:25 -msgid "" -"The PyTorch quickstart example shows CIFAR-10 image classification with a" -" simple Convolutional Neural Network:" -msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/ref-example-projects.rst:28 +#: ../../source/ref-changelog.md:470 +#, fuzzy msgid "" -"`Quickstart PyTorch (Code) " -"`_" -msgstr "" -"`PyTorch快速入门 (代码) `_" - -#: ../../source/ref-example-projects.rst:29 -#, fuzzy -msgid ":doc:`Quickstart PyTorch (Tutorial) `" +"**General improvements** " +"([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" msgstr "" -"`PyTorch快速入门 (教程) `_" - -#: ../../source/ref-example-projects.rst:33 -msgid "PyTorch: From Centralized To Federated" -msgstr "PyTorch: 从集中式到联邦式" - -#: ../../source/ref-example-projects.rst:35 -msgid "" -"This example shows how a regular PyTorch project can be federated using " -"Flower:" -msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" +"**一般改进** ([#2803](https://github.com/adap/flower/pull/2803), " +"[#2847](https://github.com/adap/flower/pull/2847), " +"[#2877](https://github.com/adap/flower/pull/2877), " +"[#2690](https://github.com/adap/flower/pull/2690), " +"[#2889](https://github.com/adap/flower/pull/2889), " +"[#2874](https://github.com/adap/flower/pull/2874), " +"[#2819](https://github.com/adap/flower/pull/2819), " +"[#2689](https://github.com/adap/flower/pull/2689), " +"[#2457](https://github.com/adap/flower/pull/2457), " +"[#2870](https://github.com/adap/flower/pull/2870), " +"[#2669](https://github.com/adap/flower/pull/2669), " +"[#2876](https://github.com/adap/flower/pull/2876), " +"[#2885](https://github.com/adap/flower/pull/2885), " +"[#2858](https://github.com/adap/flower/pull/2858), " +"[#2867](https://github.com/adap/flower/pull/2867), " +"[#2351](https://github.com/adap/flower/pull/2351), " +"[#2886](https://github.com/adap/flower/pull/2886), " +"[#2860](https://github.com/adap/flower/pull/2860), " +"[#2828](https://github.com/adap/flower/pull/2828), " +"[#2869](https://github.com/adap/flower/pull/2869), " +"[#2875](https://github.com/adap/flower/pull/2875), " +"[#2733](https://github.com/adap/flower/pull/2733), " +"[#2488](https://github.com/adap/flower/pull/2488), " +"[#2646](https://github.com/adap/flower/pull/2646), " +"[#2879](https://github.com/adap/flower/pull/2879), " +"[#2821](https://github.com/adap/flower/pull/2821), " +"[#2855](https://github.com/adap/flower/pull/2855), " +"[#2800](https://github.com/adap/flower/pull/2800), " +"[#2807](https://github.com/adap/flower/pull/2807), " +"[#2801](https://github.com/adap/flower/pull/2801), " +"[#2804](https://github.com/adap/flower/pull/2804), " +"[#2851](https://github.com/adap/flower/pull/2851), " +"[#2787](https://github.com/adap/flower/pull/2787), " +"[#2852](https://github.com/adap/flower/pull/2852), " +"[#2672](https://github.com/adap/flower/pull/2672), " +"[#2759](https://github.com/adap/flower/pull/2759))" -#: ../../source/ref-example-projects.rst:37 +#: ../../source/ref-changelog.md:474 +#, fuzzy msgid "" -"`PyTorch: From Centralized To Federated (Code) " -"`_" +"**Deprecate** `start_numpy_client` " +"([#2563](https://github.com/adap/flower/pull/2563), " +"[#2718](https://github.com/adap/flower/pull/2718))" msgstr "" -"PyTorch: 从集中式到联邦式(代码) `_" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/ref-example-projects.rst:38 +#: ../../source/ref-changelog.md:476 #, fuzzy msgid "" -":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +"Until now, clients of type `NumPyClient` needed to be started via " +"`start_numpy_client`. In our efforts to consolidate framework APIs, we " +"have introduced changes, and now all client types should start via " +"`start_client`. To continue using `NumPyClient` clients, you simply need " +"to first call the `.to_client()` method and then pass returned `Client` " +"object to `start_client`. The examples and the documentation have been " +"updated accordingly." msgstr "" -"PyTorch: 从集中式到联邦式(教程) `_" +"到目前为止,\"NumPyClient \"类型的客户端需要通过 \"start_numpy_client \"启动。为了整合框架 " +"API,我们引入了一些变化,现在所有客户端类型都应通过 `start_client` 启动。要继续使用 `NumPyClient` " +"客户端,只需首先调用 `.to_client()` 方法,然后将返回的 `Client` 对象传递给 " +"`start_client`。示例和文档已相应更新。" -#: ../../source/ref-example-projects.rst:42 -msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" -msgstr "树莓派和 Nvidia Jetson 上的联邦学习" +#: ../../source/ref-changelog.md:478 +#, fuzzy +msgid "" +"**Deprecate legacy DP wrappers** " +"([#2749](https://github.com/adap/flower/pull/2749))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/ref-example-projects.rst:44 +#: ../../source/ref-changelog.md:480 +#, fuzzy msgid "" -"This example shows how Flower can be used to build a federated learning " -"system that run across Raspberry Pi and Nvidia Jetson:" -msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" +"Legacy DP wrapper classes are deprecated, but still functional. This is " +"in preparation for an all-new pluggable version of differential privacy " +"support in Flower." +msgstr "传统的 DP 封装类已废弃,但仍可正常使用。这是为 Flower 中的全新可插拔差分隐私支持版本做准备。" -#: ../../source/ref-example-projects.rst:46 +#: ../../source/ref-changelog.md:482 +#, fuzzy msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " -"`_" +"**Make optional arg** `--callable` **in** `flower-client` **a required " +"positional arg** ([#2673](https://github.com/adap/flower/pull/2673))" msgstr "" -"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " -"`_" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/ref-example-projects.rst:47 +#: ../../source/ref-changelog.md:484 +#, fuzzy msgid "" -"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " -"`_" +"**Rename** `certificates` **to** `root_certificates` **in** `Driver` " +"([#2890](https://github.com/adap/flower/pull/2890))" msgstr "" -"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " -"`_" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/ref-faq.rst:4 +#: ../../source/ref-changelog.md:486 +#, fuzzy msgid "" -"This page collects answers to commonly asked questions about Federated " -"Learning with Flower." -msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" +"**Drop experimental** `Task` **fields** " +"([#2866](https://github.com/adap/flower/pull/2866), " +"[#2865](https://github.com/adap/flower/pull/2865))" +msgstr "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/ref-faq.rst +#: ../../source/ref-changelog.md:488 #, fuzzy -msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" -msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" - -#: ../../source/ref-faq.rst:8 msgid "" -"Yes, it can! Flower even comes with a few under-the-hood optimizations to" -" make it work even better on Colab. Here's a quickstart example:" -msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" +"Experimental fields `sa`, `legacy_server_message` and " +"`legacy_client_message` were removed from `Task` message. The removed " +"fields are superseded by the new `RecordSet` abstraction." +msgstr "" +"从 `Task` 消息中删除了试验性字段 `sa`、 `legacy_server_message` 和 " +"`legacy_client_message`。删除的字段已被新的 `RecordSet` 抽象所取代。" -#: ../../source/ref-faq.rst:10 +#: ../../source/ref-changelog.md:490 +#, fuzzy msgid "" -"`Flower simulation PyTorch " -"`_" -msgstr "" -"`Flower 模拟 PyTorch " -"`_" +"**Retire MXNet examples** " +"([#2724](https://github.com/adap/flower/pull/2724))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/ref-faq.rst:11 +#: ../../source/ref-changelog.md:492 +#, fuzzy msgid "" -"`Flower simulation TensorFlow/Keras " -"`_" +"The development of the MXNet fremework has ended and the project is now " +"[archived on GitHub](https://github.com/apache/mxnet). Existing MXNet " +"examples won't receive updates." msgstr "" -"`Flower模拟TensorFlow/Keras " -"`_" +"MXNet fremework 的开发工作已经结束,该项目现已[归档于 " +"GitHub](https://github.com/apache/mxnet)。现有的 MXNet 示例不会收到更新。" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" -msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" +#: ../../source/ref-changelog.md:494 +#, fuzzy +msgid "v1.6.0 (2023-11-28)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/ref-faq.rst:15 +#: ../../source/ref-changelog.md:500 +#, fuzzy msgid "" -"Find the `blog post about federated learning on embedded device here " -"`_" -" and the corresponding `GitHub code example " -"`_." +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " msgstr "" -"请点击此处查看有关嵌入式设备联邦学习的 " -"\"博文\"`_和相应的" -" \"GitHub 代码示例\"`_。" - -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" -msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" +"`Aashish Kolluri`, `Adam Narozniak`, `Alessio Mora`, `Barathwaja S`, " +"`Charles Beauville`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Gabriel " +"Mota`, `Heng Pan`, `Ivan Agarský`, `JS.KIM`, `Javier`, `Marius Schlegel`," +" `Navin Chandra`, `Nic Lane`, `Peterpan828`, `Qinbin Li`, `Shaz-hash`, " +"`Steve Laskaridis`, `Taner Topal`, `William Lindskog`, `Yan Gao`, " +"`cnxdeveloper`, `k3nfalt` " -#: ../../source/ref-faq.rst:19 +#: ../../source/ref-changelog.md:504 msgid "" -"Yes, it does. Please take a look at our `blog post " -"`_ or check out the code examples:" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" msgstr "" -"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" +"** 增加对 Python 3.12 的实验支持** " +"([#2565](https://github.com/adap/flower/pull/2565))" -#: ../../source/ref-faq.rst:21 +#: ../../source/ref-changelog.md:506 +#, fuzzy msgid "" -"`Android Kotlin example `_" -msgstr "`Android Kotlin 示例 `_" - -#: ../../source/ref-faq.rst:22 -msgid "`Android Java example `_" -msgstr "Android Java 示例 `_" +"**Add new XGBoost examples** " +"([#2612](https://github.com/adap/flower/pull/2612), " +"[#2554](https://github.com/adap/flower/pull/2554), " +"[#2617](https://github.com/adap/flower/pull/2617), " +"[#2618](https://github.com/adap/flower/pull/2618), " +"[#2619](https://github.com/adap/flower/pull/2619), " +"[#2567](https://github.com/adap/flower/pull/2567))" +msgstr "" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/ref-faq.rst -msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" -msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" +#: ../../source/ref-changelog.md:508 +#, fuzzy +msgid "" +"We have added a new `xgboost-quickstart` example alongside a new " +"`xgboost-comprehensive` example that goes more in-depth." +msgstr "" +"我们添加了一个新的 \"xgboost-quickstart \"示例和一个新的 \"xgboost-comprehensive " +"\"示例,后者更加深入。" -#: ../../source/ref-faq.rst:26 +#: ../../source/ref-changelog.md:510 +#, fuzzy msgid "" -"Yes, of course. A list of available examples using Flower within a " -"blockchain environment is available here:" -msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" +"**Add Vertical FL example** " +"([#2598](https://github.com/adap/flower/pull/2598))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-changelog.md:512 +#, fuzzy msgid "" -"`Flower meets Nevermined GitHub Repository `_." +"We had many questions about Vertical Federated Learning using Flower, so " +"we decided to add an simple example for it on the [Titanic " +"dataset](https://www.kaggle.com/competitions/titanic/data) alongside a " +"tutorial (in the README)." msgstr "" -"`Flower meets Nevermined GitHub Repository `_." +"我们收到了许多关于使用 Flower 进行垂直联合学习的问题,因此我们决定在 [Titanic " +"数据集](https://www.kaggle.com/competitions/titanic/data) 上添加一个简单的示例,并附上教程(在" +" README 中)。" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-changelog.md:514 msgid "" -"`Flower meets Nevermined YouTube video " -"`_." -msgstr "" -"`Flower meets Nevermined YouTube 视频 " -"`_." +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" +msgstr "**在***`start_driver()`中支持自定义***`ClientManager([#2292](https://github.com/adap/flower/pull/2292))" -#: ../../source/ref-faq.rst:30 -#, fuzzy +#: ../../source/ref-changelog.md:516 msgid "" -"`Flower meets KOSMoS `_." +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" msgstr "" -"`Flower meets KOSMoS `_." +"**更新 REST API 以支持创建和删除节点** " +"([#2283](https://github.com/adap/flower/pull/2283))" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-changelog.md:518 +#, fuzzy msgid "" -"`Flower meets Talan blog post `_ ." +"**Update the Android SDK** " +"([#2187](https://github.com/adap/flower/pull/2187))" msgstr "" -"`Flower meets Talan博文 `_ 。" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-changelog.md:520 +#, fuzzy +msgid "Add gRPC request-response capability to the Android SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" + +#: ../../source/ref-changelog.md:522 +#, fuzzy msgid "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"**Update the C++ SDK** " +"([#2537](https://github.com/adap/flower/pull/2537), " +"[#2528](https://github.com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" msgstr "" -"`Flower meets Talan GitHub Repository " -"`_ ." +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/ref-telemetry.md:1 -msgid "Telemetry" -msgstr "遥测功能" +#: ../../source/ref-changelog.md:524 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "为 C++ SDK 添加 gRPC 请求-响应功能。" -#: ../../source/ref-telemetry.md:3 +#: ../../source/ref-changelog.md:526 +#, fuzzy msgid "" -"The Flower open-source project collects **anonymous** usage metrics to " -"make well-informed decisions to improve Flower. Doing this enables the " -"Flower team to understand how Flower is used and what challenges users " -"might face." +"**Make HTTPS the new default** " +"([#2591](https://github.com/adap/flower/pull/2591), " +"[#2636](https://github.com/adap/flower/pull/2636))" msgstr "" -"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " -"Flower 的使用情况以及用户可能面临的挑战。" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-telemetry.md:5 +#: ../../source/ref-changelog.md:528 +#, fuzzy msgid "" -"**Flower is a friendly framework for collaborative AI and data science.**" -" Staying true to this statement, Flower makes it easy to disable " -"telemetry for users that do not want to share anonymous usage metrics." -msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" - -#: ../../source/ref-telemetry.md:7 -msgid "Principles" -msgstr "原则" +"Flower is moving to HTTPS by default. The new `flower-server` requires " +"passing `--certificates`, but users can enable `--insecure` to use HTTP " +"for prototyping. The same applies to `flower-client`, which can either " +"use user-provided credentials or gRPC-bundled certificates to connect to " +"an HTTPS-enabled server or requires opt-out via passing `--insecure` to " +"enable insecure HTTP connections." +msgstr "" +"Flower 默认使用 HTTPS。新的 \"flower-server \"需要通过\"--证书\",但用户可以启用\"--不安全 \"来使用 " +"HTTP 进行原型开发。这同样适用于 `flower-client`,它可以使用用户提供的凭证或 gRPC 绑定证书连接到支持 HTTPS " +"的服务器,也可以通过传递 `--insecure`来启用不安全的 HTTP 连接。" -#: ../../source/ref-telemetry.md:9 -msgid "We follow strong principles guarding anonymous usage metrics collection:" -msgstr "我们遵循严格的匿名使用指标收集原则:" +#: ../../source/ref-changelog.md:530 +#, fuzzy +msgid "" +"For backward compatibility, `start_client()` and `start_numpy_client()` " +"will still start in insecure mode by default. In a future release, " +"insecure connections will require user opt-in by passing `insecure=True`." +msgstr "" +"为了向后兼容,`start_client()` 和 `start_numpy_client()` " +"默认仍以不安全模式启动。在未来的版本中,不安全连接将需要用户通过传递 `insecure=True` 进行选择。" -#: ../../source/ref-telemetry.md:11 +#: ../../source/ref-changelog.md:532 msgid "" -"**Optional:** You will always be able to disable telemetry; read on to " -"learn “[How to opt-out](#how-to-opt-out)”." -msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" +msgstr "" +"** 统一客户端应用程序接口** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" -#: ../../source/ref-telemetry.md:12 +#: ../../source/ref-changelog.md:534 +#, fuzzy msgid "" -"**Anonymous:** The reported usage metrics are anonymous and do not " -"contain any personally identifiable information (PII). See “[Collected " -"metrics](#collected-metrics)” to understand what metrics are being " -"reported." +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. The `to_client()` function is introduced to " +"convert a `NumPyClient` to a `Client`." msgstr "" -"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " -"\"了解报告的指标。" +"使用 `client_fn`,Flower 客户端可以作为独立进程(即通过 `start_client`)或在模拟中(即通过 " +"`start_simulation`)交替运行,而无需更改客户端类的定义和实例化方式。调用 `start_numpy_client` 现已过时。" -#: ../../source/ref-telemetry.md:13 +#: ../../source/ref-changelog.md:536 msgid "" -"**Transparent:** You can easily inspect what anonymous metrics are being " -"reported; see the section “[How to inspect what is being reported](#how-" -"to-inspect-what-is-being-reported)”" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" msgstr "" -"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" -"being-reported)\"部分" +"**添加新**\"Bulyan " +"\"**策略**([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891)" -#: ../../source/ref-telemetry.md:14 +#: ../../source/ref-changelog.md:538 msgid "" -"**Open for feedback:** You can always reach out to us if you have " -"feedback; see the section “[How to contact us](#how-to-contact-us)” for " -"details." -msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "新的 \"Bulyan\"策略通过[El Mhamdi 等人,2018](https://arxiv.org/abs/1802.07927)实现" -#: ../../source/ref-telemetry.md:16 -msgid "How to opt-out" -msgstr "如何退出" +#: ../../source/ref-changelog.md:540 +#, fuzzy +msgid "" +"**Add new** `XGB Bagging` **strategy** " +"([#2611](https://github.com/adap/flower/pull/2611))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/ref-telemetry.md:18 +#: ../../source/ref-changelog.md:542 ../../source/ref-changelog.md:544 +#, fuzzy msgid "" -"When Flower starts, it will check for an environment variable called " -"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " -"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " -"client, simply do so by prepending your command as in:" +"**Introduce `WorkloadState`** " +"([#2564](https://github.com/adap/flower/pull/2564), " +"[#2632](https://github.com/adap/flower/pull/2632))" msgstr "" -"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " -"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " -"服务器或客户端,只需在命令前添加以下内容即可:" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/ref-telemetry.md:24 +#: ../../source/ref-changelog.md:548 msgid "" -"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," -" `.bashrc` (or whatever configuration file applies to your environment) " -"to disable Flower telemetry permanently." +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" msgstr "" -"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " -"Flower telemetry。" - -#: ../../source/ref-telemetry.md:26 -msgid "Collected metrics" -msgstr "收集的指标" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" -#: ../../source/ref-telemetry.md:28 -msgid "Flower telemetry collects the following metrics:" -msgstr "Flower 遥测技术收集以下指标:" +#: ../../source/ref-changelog.md:550 +msgid "" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" +msgstr "" +"Baselines文档([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400)" -#: ../../source/ref-telemetry.md:30 +#: ../../source/ref-changelog.md:552 msgid "" -"**Flower version.** Understand which versions of Flower are currently " -"being used. This helps us to decide whether we should invest effort into " -"releasing a patch version for an older version of Flower or instead use " -"the bandwidth to build new features." -msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" - -#: ../../source/ref-telemetry.md:32 -msgid "" -"**Operating system.** Enables us to answer questions such as: *Should we " -"create more guides for Linux, macOS, or Windows?*" -msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" - -#: ../../source/ref-telemetry.md:34 -msgid "" -"**Python version.** Knowing the Python version helps us, for example, to " -"decide whether we should invest effort into supporting old versions of " -"Python or stop supporting them and start taking advantage of new Python " -"features." -msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" - -#: ../../source/ref-telemetry.md:36 -msgid "" -"**Hardware properties.** Understanding the hardware environment that " -"Flower is being used in helps to decide whether we should, for example, " -"put more effort into supporting low-resource environments." -msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" - -#: ../../source/ref-telemetry.md:38 -msgid "" -"**Execution mode.** Knowing what execution mode Flower starts in enables " -"us to understand how heavily certain features are being used and better " -"prioritize based on that." -msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" - -#: ../../source/ref-telemetry.md:40 -msgid "" -"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " -"time a Flower workload starts. This allows us to understand which device " -"types not only start Flower workloads but also successfully complete " -"them." -msgstr "" -"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " -"工作负载,而且还成功完成了它们。" - -#: ../../source/ref-telemetry.md:42 -msgid "" -"**Source.** Flower telemetry tries to store a random source ID in " -"`~/.flwr/source` the first time a telemetry event is generated. The " -"source ID is important to identify whether an issue is recurring or " -"whether an issue is triggered by multiple clusters running concurrently " -"(which often happens in simulation). For example, if a device runs " -"multiple workloads at the same time, and this results in an issue, then, " -"in order to reproduce the issue, multiple workloads must be started at " -"the same time." -msgstr "" -"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " -"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" - -#: ../../source/ref-telemetry.md:44 -msgid "" -"You may delete the source ID at any time. If you wish for all events " -"logged under a specific source ID to be deleted, you can send a deletion " -"request mentioning the source ID to `telemetry@flower.ai`. All events " -"related to that source ID will then be permanently deleted." -msgstr "" -"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" -" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" - -#: ../../source/ref-telemetry.md:46 -msgid "" -"We will not collect any personally identifiable information. If you think" -" any of the metrics collected could be misused in any way, please [get in" -" touch with us](#how-to-contact-us). We will update this page to reflect " -"any changes to the metrics collected and publish changes in the " -"changelog." -msgstr "" -"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" -"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" - -#: ../../source/ref-telemetry.md:48 -msgid "" -"If you think other metrics would be helpful for us to better guide our " -"decisions, please let us know! We will carefully review them; if we are " -"confident that they do not compromise user privacy, we may add them." -msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" - -#: ../../source/ref-telemetry.md:50 -msgid "How to inspect what is being reported" -msgstr "如何检查报告中的内容" - -#: ../../source/ref-telemetry.md:52 -msgid "" -"We wanted to make it very easy for you to inspect what anonymous usage " -"metrics are reported. You can view all the reported telemetry information" -" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " -"is disabled by default. You may use logging independently from " -"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " -"without sending any metrics." +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" msgstr "" -"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " -"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " -"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" - -#: ../../source/ref-telemetry.md:58 -msgid "" -"The inspect Flower telemetry without sending any anonymous usage metrics," -" use both environment variables:" -msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" - -#: ../../source/ref-telemetry.md:64 -msgid "How to contact us" -msgstr "如何联系我们" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" -#: ../../source/ref-telemetry.md:66 +#: ../../source/ref-changelog.md:554 msgid "" -"We want to hear from you. If you have any feedback or ideas on how to " -"improve the way we handle anonymous usage metrics, reach out to us via " -"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " -"(`telemetry@flower.ai`)." +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" msgstr "" -"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" -"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" -#: ../../source/tutorial-quickstart-android.rst:-1 -msgid "" -"Read this Federated Learning quickstart tutorial for creating an Android " -"app using Flower." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" +#: ../../source/ref-changelog.md:556 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" -#: ../../source/tutorial-quickstart-android.rst:5 -msgid "Quickstart Android" -msgstr "快速入门 Android" +#: ../../source/ref-changelog.md:558 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "FjORD [#2431](https://github.com/adap/flower/pull/2431)" -#: ../../source/tutorial-quickstart-android.rst:10 -msgid "" -"Let's build a federated learning system using TFLite and Flower on " -"Android!" -msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" +#: ../../source/ref-changelog.md:560 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "MOON [#2421](https://github.com/adap/flower/pull/2421)" -#: ../../source/tutorial-quickstart-android.rst:12 -msgid "" -"Please refer to the `full code example " -"`_ to learn " -"more." -msgstr "" -"请参阅`完整代码示例 " -"`_了解更多信息。" +#: ../../source/ref-changelog.md:562 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" +#: ../../source/ref-changelog.md:564 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "FedPer [#2266](https://github.com/adap/flower/pull/2266)" -#: ../../source/tutorial-quickstart-fastai.rst:5 -msgid "Quickstart fastai" -msgstr "快速入门 fastai" +#: ../../source/ref-changelog.md:566 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" -msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" +#: ../../source/ref-changelog.md:568 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" -#: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/ref-changelog.md:570 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" msgstr "" -"请参阅 `完整代码示例 `_了解更多信息。" - -#: ../../source/tutorial-quickstart-huggingface.rst:-1 -msgid "" -"Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" - -#: ../../source/tutorial-quickstart-huggingface.rst:5 -msgid "Quickstart 🤗 Transformers" -msgstr "🤗 Transformers快速入门" - -#: ../../source/tutorial-quickstart-huggingface.rst:10 -msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" -msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/ref-changelog.md:572 +#, fuzzy msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"**General updates to Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384), " +"[#2425](https://github.com/adap/flower/pull/2425), " +"[#2526](https://github.com/adap/flower/pull/2526), " +"[#2302](https://github.com/adap/flower/pull/2302), " +"[#2545](https://github.com/adap/flower/pull/2545))" msgstr "" -"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " -"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "依赖关系" +"** 更新 C++ SDK** ([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" -#: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/ref-changelog.md:574 +#, fuzzy msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"**General updates to Flower Baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435), " +"[#2462](https://github.com/adap/flower/pull/2462), " +"[#2463](https://github.com/adap/flower/pull/2463), " +"[#2461](https://github.com/adap/flower/pull/2461), " +"[#2469](https://github.com/adap/flower/pull/2469), " +"[#2466](https://github.com/adap/flower/pull/2466), " +"[#2471](https://github.com/adap/flower/pull/2471), " +"[#2472](https://github.com/adap/flower/pull/2472), " +"[#2470](https://github.com/adap/flower/pull/2470))" msgstr "" -"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :code:`flwr`、 " -":code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来完成:" - -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "标准Hugging Face工作流程" - -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "处理数据" +"**普通改进**([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +" [#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446) " +"[#2561](https://github.com/adap/flower/pull/2561))" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/ref-changelog.md:576 +#, fuzzy msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448), " +"[#2294](https://github.com/adap/flower/pull/2294))" msgstr "" -"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后,我们需要对数据进行标记化,并创建" -" :code:`PyTorch` 数据加载器,这些都将在 :code:`load_data` 函数中完成:" - -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "训练和测试模型" +"**模拟引擎的普通更新** ([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448))" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/ref-changelog.md:578 +#, fuzzy msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"**General updates to Flower SDKs** " +"([#2288](https://github.com/adap/flower/pull/2288), " +"[#2429](https://github.com/adap/flower/pull/2429), " +"[#2555](https://github.com/adap/flower/pull/2555), " +"[#2543](https://github.com/adap/flower/pull/2543), " +"[#2544](https://github.com/adap/flower/pull/2544), " +"[#2597](https://github.com/adap/flower/pull/2597), " +"[#2623](https://github.com/adap/flower/pull/2623))" msgstr "" -"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` " -"训练或测试循环都非常相似:" - -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "创建模型本身" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/ref-changelog.md:580 +#, fuzzy msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" -msgstr "" -"要创建模型本身,我们只需使用 Hugging Face 的 :code:`AutoModelForSequenceClassification` " -"加载预训练的 distillBERT 模型:" - -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "将示例联邦化" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" +msgstr "" +"**一般改进** ([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[#2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[#2317](https://github.com/adap/flower/pull/2317), " +"[#2349](https://github.com/adap/flower/pull/2349), " +"[#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446), " +"[#2561](https://github.com/adap/flower/pull/2561), " +"[#2273](https://github.com/adap/flower/pull/2273), " +"[#2267](https://github.com/adap/flower/pull/2267), " +"[#2274](https://github.com/adap/flower/pull/2274), " +"[#2275](https://github.com/adap/flower/pull/2275), " +"[#2432](https://github.com/adap/flower/pull/2432), " +"[#2251](https://github.com/adap/flower/pull/2251), " +"[#2321](https://github.com/adap/flower/pull/2321), " +"[#1936](https://github.com/adap/flower/pull/1936), " +"[#2408](https://github.com/adap/flower/pull/2408), " +"[#2413](https://github.com/adap/flower/pull/2413), " +"[#2401](https://github.com/adap/flower/pull/2401), " +"[#2531](https://github.com/adap/flower/pull/2531), " +"[#2534](https://github.com/adap/flower/pull/2534), " +"[#2535](https://github.com/adap/flower/pull/2535), " +"[#2521](https://github.com/adap/flower/pull/2521), " +"[#2553](https://github.com/adap/flower/pull/2553), " +"[#2596](https://github.com/adap/flower/pull/2596))" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "创建 IMDBClient" +#: ../../source/ref-changelog.md:582 ../../source/ref-changelog.md:672 +#: ../../source/ref-changelog.md:736 ../../source/ref-changelog.md:790 +#: ../../source/ref-changelog.md:857 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "Flower 进行了许多改进,这里就不一一列举了。" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/ref-changelog.md:586 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" msgstr "" -"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " -":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:`PyTorch` 模型:" +"**移除对 Python 3.7 的支持** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/ref-changelog.md:588 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." -msgstr "" -":code:`get_parameters` " -"函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" - -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "启动服务器" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." +msgstr "在 Flower 1.5 中,Python 3.7 支持已被弃用,本版本将删除该支持。Flower 现在需要 Python 3.8。" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/ref-changelog.md:590 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" msgstr "" -"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " -":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " -":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" +"**从** `start_client` 中移除** `rest` **实验参数 " +"([#2324](https://github.com/adap/flower/pull/2324))" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/ref-changelog.md:592 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." msgstr "" -"使用 :code:`weighted_average` " -"函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "把所有东西放在一起" +"删除了 `start_client` 和 `start_numpy_client` 中的参数 `rest`(仍属试验性质)。请使用 " +"`transport=\"rest\"` 来选择使用试验性 REST API。" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "现在我们可以使用:" +#: ../../source/ref-changelog.md:594 +msgid "v1.5.0 (2023-08-31)" +msgstr "v1.5.0 (2023-08-31)" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/ref-changelog.md:600 msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "他们就能连接到服务器,开始联邦训练。" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +msgstr "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " -#: ../../source/tutorial-quickstart-huggingface.rst:223 -#, fuzzy +#: ../../source/ref-changelog.md:604 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" msgstr "" -"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." +"**引入新的模拟引擎** ([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/ref-changelog.md:606 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." -msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +msgstr "" +"新的模拟引擎从头开始重新编写,但仍完全向后兼容。它的稳定性和内存处理能力大大提高,尤其是在使用 GPU 时。仿真可透明地适应不同的设置,以在仅 " +"CPU、CPU+GPU、多 GPU 或多节点多 GPU 环境中扩展模拟。" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/ref-changelog.md:608 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." -msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.ai/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +msgstr "" +"综合文档包括新的[how-to run simulations](https://flower.ai/docs/framework/how-to-" +"run-simulations.html) guide, new [simulation-" +"pytorch](https://flower.ai/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.ai/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)。" -#: ../../source/tutorial-quickstart-ios.rst:-1 +#: ../../source/ref-changelog.md:610 msgid "" -"Read this Federated Learning quickstart tutorial for creating an iOS app " -"using Flower to train a neural network on MNIST." -msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" +msgstr "" +"**重构 Flower 文档** ([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" -#: ../../source/tutorial-quickstart-ios.rst:5 -msgid "Quickstart iOS" -msgstr "快速入门 iOS" +#: ../../source/ref-changelog.md:612 +#, fuzzy +msgid "" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.ai/docs](https://flower.ai/docs) is now " +"divided into Flower Framework, Flower Baselines, Flower Android SDK, " +"Flower iOS SDK, and code example projects." +msgstr "" +"Flower 文档体验的全面重构耗费了大量精力。现在,[flower.ai/docs](flower.ai/docs)上的文档分为 Flower " +"Framework、Flower Baselines、Flower Android SDK、Flower iOS SDK 和代码示例项目。" -#: ../../source/tutorial-quickstart-ios.rst:10 +#: ../../source/ref-changelog.md:614 msgid "" -"In this tutorial we will learn how to train a Neural Network on MNIST " -"using Flower and CoreML on iOS devices." -msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" +msgstr "" +"**介绍 Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" -#: ../../source/tutorial-quickstart-ios.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:616 msgid "" -"First of all, for running the Flower Python server, it is recommended to " -"create a virtual environment and run everything within a :doc:`virtualenv" -" `. For the Flower client " -"implementation in iOS, it is recommended to use Xcode as our IDE." +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." msgstr "" -"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " -"`_ 中运行一切。对于在 iOS 中实现 " -"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" +"这是 Flower Swift SDK 的首个预览版。Flower 对 iOS 的支持正在不断改进,除了 Swift SDK " +"和代码示例外,现在还有 iOS 快速入门教程。" -#: ../../source/tutorial-quickstart-ios.rst:15 +#: ../../source/ref-changelog.md:618 msgid "" -"Our example consists of one Python *server* and two iPhone *clients* that" -" all have the same model." -msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" +"**介绍Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" -#: ../../source/tutorial-quickstart-ios.rst:17 +#: ../../source/ref-changelog.md:620 msgid "" -"*Clients* are responsible for generating individual weight updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." +msgstr "" +"这是 Flower Kotlin SDK 的首个预览版。Flower 对 Android 的支持正在不断改进,除了 Kotlin SDK " +"和代码示例,现在还有 Android 快速入门教程。" -#: ../../source/tutorial-quickstart-ios.rst:21 +#: ../../source/ref-changelog.md:622 msgid "" -"Now that we have a rough idea of what is going on, let's get started to " -"setup our Flower server environment. We first need to install Flower. You" -" can do this by using pip:" -msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" - -#: ../../source/tutorial-quickstart-ios.rst:27 -msgid "Or Poetry:" -msgstr "或者Poetry:" - -#: ../../source/tutorial-quickstart-ios.rst:34 -#: ../../source/tutorial-quickstart-pytorch.rst:37 -#: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 -#: ../../source/tutorial-quickstart-xgboost.rst:55 -msgid "Flower Client" -msgstr "Flower 客户端" - -#: ../../source/tutorial-quickstart-ios.rst:36 -msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training using CoreML as our local training pipeline and " -"MNIST as our dataset. For simplicity reasons we will use the complete " -"Flower client with CoreML, that has been implemented and stored inside " -"the Swift SDK. The client implementation can be seen below:" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" msgstr "" -"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " -"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " -"中实现并存储。客户端实现如下:" +"*介绍新的端到端测试** ([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" -#: ../../source/tutorial-quickstart-ios.rst:72 +#: ../../source/ref-changelog.md:624 msgid "" -"Let's create a new application project in Xcode and add :code:`flwr` as a" -" dependency in your project. For our application, we will store the logic" -" of our app in :code:`FLiOSModel.swift` and the UI elements in " -":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" -" in this quickstart. Please refer to the `full code example " -"`_ to learn more " -"about the app." -msgstr "" -"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " -":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " -"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " -"`_ 以了解更多有关应用程序的信息。" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." +msgstr "新的测试设施可确保新的变更与现有的框架集成或策略保持兼容。" -#: ../../source/tutorial-quickstart-ios.rst:75 -msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" -msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" +#: ../../source/ref-changelog.md:626 +msgid "**Deprecate Python 3.7**" +msgstr "** 过时的 Python 3.7**" -#: ../../source/tutorial-quickstart-ios.rst:83 +#: ../../source/ref-changelog.md:628 msgid "" -"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " -"will be bundled inside the application during deployment to your iOS " -"device. We need to pass the url to access mlmodel and run CoreML machine " -"learning processes, it can be retrieved by calling the function " -":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " -"into :code:`MLBatchProvider` object. The preprocessing is done inside " -":code:`DataLoader.swift`." -msgstr "" -"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " -"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " -"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " -"中完成。" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "由于 Python 3.7 已于 2023-06-27 弃用 (EOL),对 Python 3.7 的支持现已废弃,并将在即将发布的版本中移除。" -#: ../../source/tutorial-quickstart-ios.rst:99 -#, fuzzy +#: ../../source/ref-changelog.md:630 msgid "" -"Since CoreML does not allow the model parameters to be seen before " -"training, and accessing the model parameters during or after the training" -" can only be done by specifying the layer name, we need to know this " -"information beforehand, through looking at the model specification, which" -" are written as proto files. The implementation can be seen in " -":code:`MLModelInspect`." +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" msgstr "" -"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " -"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" +"**添加新的**`FedTrimmedAvg`**策略**([#1769](https://github.com/adap/flower/pull/1769)," +" [#1853](https://github.com/adap/flower/pull/1853)" -#: ../../source/tutorial-quickstart-ios.rst:102 -#, fuzzy +#: ../../source/ref-changelog.md:632 msgid "" -"After we have all of the necessary information, let's create our Flower " -"client." -msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." +msgstr "" +"新的 \"FedTrimmedAvg \"策略实现了[Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)的 \"Trimmed Mean\"。" -#: ../../source/tutorial-quickstart-ios.rst:117 +#: ../../source/ref-changelog.md:634 msgid "" -"Then start the Flower gRPC client and start communicating to the server " -"by passing our Flower client to the function :code:`startFlwrGRPC`." -msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" +msgstr "**引入 start_driver**([#1697](https://github.com/adap/flower/pull/1697))" -#: ../../source/tutorial-quickstart-ios.rst:124 +#: ../../source/ref-changelog.md:636 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " -"The attribute :code:`hostname` and :code:`port` tells the client which " -"server to connect to. This can be done by entering the hostname and port " -"in the application before clicking the start button to start the " -"federated learning process." +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " -":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " -"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" - -#: ../../source/tutorial-quickstart-ios.rst:129 -#: ../../source/tutorial-quickstart-pytorch.rst:203 -#: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 -#: ../../source/tutorial-quickstart-xgboost.rst:309 -msgid "Flower Server" -msgstr "Flower 服务器" +"除了 `start_server` 和使用原始驱动 API 之外,还有一个新的 `start_driver` 函数,只需修改一行代码,就能将 " +"`start_server` 脚本作为 Flower 驱动程序运行。请查看 `mt-pytorch` 代码示例,了解使用 " +"`start_driver` 的工作示例。" -#: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-pytorch.rst:205 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 +#: ../../source/ref-changelog.md:638 msgid "" -"For simple workloads we can start a Flower server and leave all the " -"configuration possibilities at their default values. In a file named " -":code:`server.py`, import Flower and start the server:" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" msgstr "" -"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " -"Flower 并启动服务器:" - -#: ../../source/tutorial-quickstart-ios.rst:142 -#: ../../source/tutorial-quickstart-pytorch.rst:216 -#: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 -msgid "Train the model, federated!" -msgstr "联邦训练模型!" - -#: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-pytorch.rst:218 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 -#: ../../source/tutorial-quickstart-xgboost.rst:525 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. FL systems usually have a server and " -"multiple clients. We therefore have to start the server first:" -msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +"为 `mt-pytorch` **代码示例**添加参数聚合 " +"([#1785](https://github.com/adap/flower/pull/1785))" -#: ../../source/tutorial-quickstart-ios.rst:152 +#: ../../source/ref-changelog.md:640 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Build and run the client through your Xcode, one through Xcode" -" Simulator and the other by deploying it to your iPhone. To see more " -"about how to deploy your app to iPhone or Simulator visit `here " -"`_." +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." msgstr "" -"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " -"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " -"`_。" +"`mt-pytorch`示例展示了如何在编写驱动程序脚本时聚合参数。附带的 `driver.py` 和 `server.py` " +"已经进行了调整,以演示构建服务器端逻辑的低级方法和高级方法。" -#: ../../source/tutorial-quickstart-ios.rst:156 +#: ../../source/ref-changelog.md:642 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system in your ios device. The full `source code " -"`_ for this " -"example can be found in :code:`examples/ios`." +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可在 " -":code:`examples/ios` 中找到。" +"**将实验性 REST API 移植到 Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/tutorial-quickstart-jax.rst:-1 +#: ../../source/ref-changelog.md:644 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Jax to train a linear regression model on a scikit-learn dataset." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" - -#: ../../source/tutorial-quickstart-jax.rst:5 -msgid "Quickstart JAX" -msgstr "快速入门 JAX" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." +msgstr "" +"REST API(试验性)曾在 [FastAPI](https://fastapi.tiangolo.com/) 中实现,但现在已迁移到直接使用 " +"[Starlette](https://www.starlette.io/) 。" -#: ../../source/tutorial-quickstart-pandas.rst:-1 +#: ../../source/ref-changelog.md:646 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with Pandas to perform Federated Analytics." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" - -#: ../../source/tutorial-quickstart-pandas.rst:5 -msgid "Quickstart Pandas" -msgstr "快速入门Pandas" - -#: ../../source/tutorial-quickstart-pandas.rst:10 -msgid "Let's build a federated analytics system using Pandas and Flower!" -msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:REST 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-pandas.rst:12 +#: ../../source/ref-changelog.md:648 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" msgstr "" -"请参阅 `完整代码示例 `_\" 了解更多信息。" +"**引入实验性 gRPC 请求-响应 API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901)" -#: ../../source/tutorial-quickstart-pytorch.rst:-1 +#: ../../source/ref-changelog.md:650 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch to train a CNN model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." +msgstr "" +"除了现有的 gRPC 应用程序接口(基于双向流)和试验性 REST 应用程序接口外,现在还有一个新的 gRPC " +"应用程序接口,它使用请求-响应模型与客户端节点通信。" -#: ../../source/tutorial-quickstart-pytorch.rst:13 +#: ../../source/ref-changelog.md:652 msgid "" -"In this tutorial we will learn how to train a Convolutional Neural " -"Network on CIFAR10 using Flower and PyTorch." -msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "请注意:gRPC 请求-响应 API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-pytorch.rst:15 -#: ../../source/tutorial-quickstart-xgboost.rst:39 -#, fuzzy +#: ../../source/ref-changelog.md:654 msgid "" -"First of all, it is recommended to create a virtual environment and run " -"everything within a :doc:`virtualenv `." +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" msgstr "" -"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" +"**用新的** `start_client(transport=\"rest\")` 替换实验性** " +"`start_client(rest=True)` " +"([#1880](https://github.com/adap/flower/pull/1880))" -#: ../../source/tutorial-quickstart-pytorch.rst:17 -#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +#: ../../source/ref-changelog.md:656 msgid "" -"Our example consists of one *server* and two *clients* all having the " -"same model." -msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." +msgstr "" +"已废弃(试验性的)`start_client`参数`rest`,改用新参数`transport`。`start_client(transport=\"rest\")`将产生与以前的`start_client(rest=True)`相同的行为。所有代码都应迁移到新参数" +" `transport`。过时的参数 `rest` 将在今后的版本中删除。" -#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/ref-changelog.md:658 msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. These updates are then sent to " -"the *server* which will aggregate them to produce a better model. " -"Finally, the *server* sends this improved version of the model back to " -"each *client*. A complete cycle of weight updates is called a *round*." -msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/flower/pull/2197))" -#: ../../source/tutorial-quickstart-pytorch.rst:23 +#: ../../source/ref-changelog.md:660 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running :" -msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." +msgstr "" +"现在我们启动一个 gRPC 服务器,并将 `grpc.keepalive_permit_without_calls` 选项默认设置为 " +"0。这将防止客户端在没有未处理数据流时发送 keepalive pings。" -#: ../../source/tutorial-quickstart-pytorch.rst:29 +#: ../../source/ref-changelog.md:662 msgid "" -"Since we want to use PyTorch to solve a computer vision task, let's go " -"ahead and install PyTorch and the **torchvision** library:" -msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "**改进示例笔记** ([#2005](https://github.com/adap/flower/pull/2005))" -#: ../../source/tutorial-quickstart-pytorch.rst:39 +#: ../../source/ref-changelog.md:664 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "有一个新的 30 分钟的联邦学习 PyTorch 教程!" + +#: ../../source/ref-changelog.md:666 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. Our training " -"procedure and network architecture are based on PyTorch's `Deep Learning " -"with PyTorch " -"`_." +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 PyTorch " -"的《Deep Learning with PyTorch " -"`_》。" +"**更新Example** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" -#: ../../source/tutorial-quickstart-pytorch.rst:41 +#: ../../source/ref-changelog.md:668 msgid "" -"In a file called :code:`client.py`, import Flower and PyTorch related " -"packages:" -msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" - -#: ../../source/tutorial-quickstart-pytorch.rst:56 -msgid "In addition, we define the device allocation in PyTorch with:" -msgstr "此外,我们还在 PyTorch 中定义了设备分配:" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." +msgstr "" +"许多示例都进行了重大更新,包括简化了 advanced-tensorflow 和 advanced-pytorch 示例,改进了 " +"TensorFlow 示例的 macOS 兼容性,以及模拟代码示例。一项重大升级是所有代码示例现在都有了 " +"\"requirements.txt\"(除 \"pyproject.toml \"外)。" -#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/ref-changelog.md:670 msgid "" -"We use PyTorch to load CIFAR10, a popular colored image classification " -"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " -"the training and test data that are then normalized." +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" msgstr "" -"我们使用 PyTorch 来加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " -":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" +"**普通改进**([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" -#: ../../source/tutorial-quickstart-pytorch.rst:78 -msgid "" -"Define the loss and optimizer with PyTorch. The training of the dataset " -"is done by looping over the dataset, measure the corresponding loss and " -"optimize it." -msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" +#: ../../source/ref-changelog.md:678 +msgid "v1.4.0 (2023-04-21)" +msgstr "v1.4.0 (2023-04-21)" -#: ../../source/tutorial-quickstart-pytorch.rst:94 +#: ../../source/ref-changelog.md:684 msgid "" -"Define then the validation of the machine learning network. We loop over" -" the test set and measure the loss and accuracy of the test set." -msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +msgstr "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/ref-changelog.md:688 msgid "" -"After defining the training and testing of a PyTorch machine learning " -"model, we use the functions for the Flower clients." -msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" +msgstr "" +"**引入对XGBoost的支持(**`FedXgbNnAvg` **策略和示例)** " +"([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" -#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/ref-changelog.md:690 msgid "" -"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " -"Minute Blitz':" -msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code example](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart) that demonstrates the usage of this new strategy in " +"an XGBoost project." +msgstr "" +"XGBoost 是一种基于树的集合机器学习算法,它使用梯度提升来提高模型的准确性。我们添加了一个新的 " +"\"FedXgbNnAvg\"[策略](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)和一个[代码示例](https://github.com/adap/flower/tree/main/examples" +"/xgboost-quickstart),演示如何在 XGBoost 项目中使用这个新策略。" -#: ../../source/tutorial-quickstart-pytorch.rst:142 +#: ../../source/ref-changelog.md:692 msgid "" -"After loading the data set with :code:`load_data()` we define the Flower " -"interface." -msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" +msgstr "" +"**介绍 iOS SDK(预览版)** ([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" -#: ../../source/tutorial-quickstart-pytorch.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/ref-changelog.md:694 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" +"对于想要在 iOS 移动设备上实施联邦学习的人来说,这是一次重大更新。现在,我们在 " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" 下提供了一个迅捷的 iOS SDK,这将大大方便应用程序的创建过程。为了展示其使用情况,我们还更新了 [iOS " +"示例](https://github.com/adap/flower/tree/main/examples/ios)!" -#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/ref-changelog.md:696 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " -"defining the following methods (:code:`set_parameters` is optional " -"though):" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 PyTorch 时,它使 " -":code:`Client` 接口的实现变得更容易。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" - -#: ../../source/tutorial-quickstart-pytorch.rst:156 -#: ../../source/tutorial-quickstart-scikitlearn.rst:119 -msgid "return the model weight as a list of NumPy ndarrays" -msgstr "以 NumPy ndarrays 列表形式返回模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 -msgid ":code:`set_parameters` (optional)" -msgstr ":code:`set_parameters` (可选)" +"**引入新的 " +"\"什么是联邦学习?\"教程**([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721)" -#: ../../source/tutorial-quickstart-pytorch.rst:158 -#: ../../source/tutorial-quickstart-scikitlearn.rst:121 +#: ../../source/ref-changelog.md:698 msgid "" -"update the local model weights with the parameters received from the " -"server" -msgstr "用从服务器接收到的参数更新本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:160 -#: ../../source/tutorial-quickstart-scikitlearn.rst:124 -msgid "set the local model weights" -msgstr "设置本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:161 -#: ../../source/tutorial-quickstart-scikitlearn.rst:125 -msgid "train the local model" -msgstr "训练本地模型" - -#: ../../source/tutorial-quickstart-pytorch.rst:162 -#: ../../source/tutorial-quickstart-scikitlearn.rst:126 -msgid "receive the updated local model weights" -msgstr "接收更新的本地模型参数" - -#: ../../source/tutorial-quickstart-pytorch.rst:164 -#: ../../source/tutorial-quickstart-scikitlearn.rst:128 -msgid "test the local model" -msgstr "测试本地模型" - -#: ../../source/tutorial-quickstart-pytorch.rst:166 -msgid "which can be implemented in the following way:" -msgstr "可以通过以下方式实现:" +"A new [entry-level tutorial](https://flower.ai/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" +msgstr "" +"我们的文档中新增了一个[入门级教程](https://flower.ai/docs/framework/tutorial-what-is-" +"federated-learning.html),解释了联邦学习的基础知识。它让任何不熟悉联邦学习的人都能开始 Flower " +"之旅。请转发给对联邦学习感兴趣的人!" -#: ../../source/tutorial-quickstart-pytorch.rst:189 -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/ref-changelog.md:700 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" +msgstr "" +"**引入新的 Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679)" -#: ../../source/tutorial-quickstart-pytorch.rst:196 -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -#, fuzzy +#: ../../source/ref-changelog.md:702 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogeneous settings." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " -":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"这条新Baseline复现了论文[Federated Optimization in Heterogeneous Networks (Li et " +"al., 2018)](https://arxiv.org/abs/1812.06127)中的 MNIST+CNN 任务。它使用 " +"\"FedProx \"策略,旨在使收敛在异构环境中更加稳健。" -#: ../../source/tutorial-quickstart-pytorch.rst:226 -#: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 -#: ../../source/tutorial-quickstart-xgboost.rst:533 +#: ../../source/ref-changelog.md:704 msgid "" -"Once the server is running we can start the clients in different " -"terminals. Open a new terminal and start the first client:" -msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" - -#: ../../source/tutorial-quickstart-pytorch.rst:233 -#: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 -#: ../../source/tutorial-quickstart-xgboost.rst:540 -msgid "Open another terminal and start the second client:" -msgstr "打开另一台终端,启动第二个客户端:" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" +msgstr "" +"**引入新的 Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" -#: ../../source/tutorial-quickstart-pytorch.rst:239 -#: ../../source/tutorial-quickstart-scikitlearn.rst:252 -#: ../../source/tutorial-quickstart-xgboost.rst:546 +#: ../../source/ref-changelog.md:706 msgid "" -"Each client will have its own dataset. You should now see how the " -"training does in the very first terminal (the one that started the " -"server):" -msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." +msgstr "" +"这一新Baseline复现了论文[LEAF: A Benchmark for Federated Settings(Caldas 等人,2018 " +"年)](https://arxiv.org/abs/1812.01097)中评估 FedAvg 算法在 FEMNIST 数据集上性能的实验。" -#: ../../source/tutorial-quickstart-pytorch.rst:271 +#: ../../source/ref-changelog.md:708 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples" -"/quickstart-pytorch`." +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/quickstart-pytorch` 中找到。" +"**引入(试验性)REST API** ([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/ref-changelog.md:710 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" - -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" -msgstr "快速入门 PyTorch Lightning" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "作为基于 gRPC 的通信栈的替代方案,我们引入了新的 REST API。在初始版本中,REST API 仅支持匿名客户端。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/ref-changelog.md:712 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" -msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "请注意:REST API 仍处于试验阶段,随着时间的推移可能会发生重大变化。" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/ref-changelog.md:714 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" msgstr "" -"请参阅 `完整代码示例 `_ 了解更多信息。" +"**改进(试验性)驱动程序应用程序接口** ([#1663](https://github.com/adap/flower/pull/1663)," +" [#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +#: ../../source/ref-changelog.md:716 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with scikit-learn to train a linear regression model." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." +msgstr "" +"驱动程序应用程序接口(Driver API)仍是一项试验性功能,但这一版本引入了一些重大升级。主要改进之一是引入了 SQLite " +"数据库,将服务器状态存储在磁盘上(而不是内存中)。另一项改进是,已交付的任务(指令或结果)现在将被删除。这大大提高了长期运行的 Flower " +"服务器的内存效率。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:5 -msgid "Quickstart scikit-learn" -msgstr "scikit-learn快速入门" +#: ../../source/ref-changelog.md:718 +msgid "" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "**修复模拟过程中与Ray有关的溢出问题** ([#1698](https://github.com/adap/flower/pull/1698))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +#: ../../source/ref-changelog.md:720 msgid "" -"In this tutorial, we will learn how to train a :code:`Logistic " -"Regression` model on MNIST using Flower and scikit-learn." -msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " -"Regression` 模型。" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "在运行长时间模拟时,`ray` 有时会溢出大量数据,导致训练无法继续。现在这个问题已经解决!🎉" -#: ../../source/tutorial-quickstart-scikitlearn.rst:12 -#, fuzzy +#: ../../source/ref-changelog.md:722 msgid "" -"It is recommended to create a virtual environment and run everything " -"within this :doc:`virtualenv `." +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" msgstr "" -"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" +"** 添加使用** `TabNet` ** 的新示例** " +"([#1725](https://github.com/adap/flower/pull/1725))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +#: ../../source/ref-changelog.md:724 msgid "" -"*Clients* are responsible for generating individual model parameter " -"updates for the model based on their local datasets. These updates are " -"then sent to the *server* which will aggregate them to produce an updated" -" global model. Finally, the *server* sends this improved version of the " -"model back to each *client*. A complete cycle of parameters updates is " -"called a *round*." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[quickstart-tabnet](https://github.com/adap/flower/tree/main/examples" +"/quickstart-tabnet)." +msgstr "" +"TabNet 是一个强大而灵活的框架,用于在表格数据上训练机器学习模型。我们现在有一个使用 Flower 的联邦示例:[quickstart-" +"tabnet](https://github.com/adap/flower/tree/main/examples/quickstart-" +"tabnet)。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +#: ../../source/ref-changelog.md:726 msgid "" -"Now that we have a rough idea of what is going on, let's get started. We " -"first need to install Flower. You can do this by running:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:26 -#, fuzzy -msgid "Since we want to use scikit-learn, let's go ahead and install it:" -msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "** 添加新的模拟监控指南** ([#1649](https://github.com/adap/flower/pull/1649))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:32 -msgid "Or simply install all dependencies using Poetry:" -msgstr "或者直接使用 Poetry 安装所有依赖项:" +#: ../../source/ref-changelog.md:728 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "我们现在有一份文档指南,可帮助用户在模拟过程中监控其性能。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +#: ../../source/ref-changelog.md:730 msgid "" -"Now that we have all our dependencies installed, let's run a simple " -"distributed training with two clients and one server. However, before " -"setting up the client and server, we will define all functionalities that" -" we need for our federated learning setup within :code:`utils.py`. The " -":code:`utils.py` contains different functions defining all the machine " -"learning basics:" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" msgstr "" -"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " -":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:45 -msgid ":code:`get_model_parameters()`" -msgstr ":code:`get_model_parameters()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:46 -msgid "Returns the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:47 -msgid ":code:`set_model_params()`" -msgstr ":code:`set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:48 -#, fuzzy -msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" -msgstr "设置:code:`sklean`的LogisticRegression模型的参数" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid ":code:`set_initial_params()`" -msgstr ":code:`set_initial_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:50 -msgid "Initializes the model parameters that the Flower server will ask for" -msgstr "初始化 Flower 服务器将要求的模型参数" +"**在模拟过程中为***`历史`***对象添加训练指标*** " +"([#1696](https://github.com/adap/flower/pull/1696))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#: ../../source/ref-changelog.md:732 msgid "" -"Please check out :code:`utils.py` `here " -"`_ for more details. The pre-defined functions are used in" -" the :code:`client.py` and imported. The :code:`client.py` also requires " -"to import several packages such as Flower and scikit-learn:" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" msgstr "" -"更多详情请查看 :code:`utils.py`` 这里 " -"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " -"还需要导入几个软件包,如 Flower 和 scikit-learn:" +"`fit_metrics_aggregation_fn`可用于汇总训练指标,但以前的版本不会将结果保存在 \"History " +"\"对象中。现在可以了!" -#: ../../source/tutorial-quickstart-scikitlearn.rst:67 -#, fuzzy +#: ../../source/ref-changelog.md:734 msgid "" -"Prior to local training, we need to load the MNIST dataset, a popular " -"image classification dataset of handwritten digits for machine learning, " -"and partition the dataset for FL. This can be conveniently achieved using" -" `Flower Datasets `_. The " -":code:`FederatedDataset.load_partition()` method loads the partitioned " -"training set for each partition ID defined in the :code:`--partition-id` " -"argument." +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" msgstr "" -"在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " -"\"Flower Datasets " -"`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" -" 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" +"**普通改进** ([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:95 -msgid "" -"Next, the logistic regression model is defined and initialized with " -":code:`utils.set_initial_params()`." -msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" +#: ../../source/ref-changelog.md:742 +msgid "v1.3.0 (2023-02-06)" +msgstr "v1.3.0 (2023-02-06)" -#: ../../source/tutorial-quickstart-scikitlearn.rst:107 +#: ../../source/ref-changelog.md:748 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to fit the logistic regression we defined earlier)." +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:113 +#: ../../source/ref-changelog.md:752 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " -"means defining the following methods (:code:`set_parameters` is optional " -"though):" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " -"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " -"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:122 -msgid "is directly imported with :code:`utils.set_model_params()`" -msgstr "直接导入 :code:`utils.set_model_params()`" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:130 -msgid "The methods can be implemented in the following way:" -msgstr "这些方法可以通过以下方式实现:" +"**在驱动程序应用程序接口中添加对** `workload_id` **和** `group_id` **的支持** " +"([#1595](https://github.com/adap/flower/pull/1595))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:153 +#: ../../source/ref-changelog.md:754 msgid "" -"We can now create an instance of our class :code:`MnistClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:160 -#, fuzzy -msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"0.0.0.0:8080\"` tells" -" the client which server to connect to. In our case we can run the server" -" and the client on the same machine, therefore we use " -":code:`\"0.0.0.0:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we pass to the client." +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" -" :code:`server_address`。" +"驱动程序 API(试验性)现在支持 `workload_id`,可用于识别任务所属的工作量。它还支持新的 " +"`group_id`,例如,可用于指示当前的训练轮次。通过 `workload_id` 和 `group_id` " +"客户端节点可以决定是否要处理某个任务。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:169 +#: ../../source/ref-changelog.md:756 msgid "" -"The following Flower server is a little bit more advanced and returns an " -"evaluation function for the server-side evaluation. First, we import " -"again all required libraries such as Flower and scikit-learn." +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" msgstr "" -"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" -"learn。" - -#: ../../source/tutorial-quickstart-scikitlearn.rst:172 -msgid ":code:`server.py`, import Flower and start the server:" -msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" +"**使Driver API 和Fleet " +"API地址可配置**([#1637](https://github.com/adap/flower/pull/1637))" -#: ../../source/tutorial-quickstart-scikitlearn.rst:185 -#, fuzzy +#: ../../source/ref-changelog.md:758 msgid "" -"The number of federated learning rounds is set in :code:`fit_round()` and" -" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " -"function is called after each federated learning round and gives you " -"information about loss and accuracy. Note that we also make use of Flower" -" Datasets here to load the test split of the MNIST dataset for server-" -"side evaluation." +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" msgstr "" -"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " -"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" +"长期运行的 Flower 服务器(Driver API 和 Fleet API)现在可以在启动时配置 Driver API(通过 " +"`--driver-api-address`)和 Fleet API(通过 `-fleet-api-address`)的服务器地址:" -#: ../../source/tutorial-quickstart-scikitlearn.rst:213 +#: ../../source/ref-changelog.md:760 +#, fuzzy msgid "" -"The :code:`main` contains the server-side parameter initialization " -":code:`utils.set_initial_params()` as well as the aggregation strategy " -":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " -"federated averaging (or FedAvg), with two clients and evaluation after " -"each federated learning round. The server can be started with the command" -" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" msgstr "" -":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " -":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " -"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " -":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " -"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:232 -msgid "" -"With both client and server ready, we can now run everything and see " -"federated learning in action. Federated learning systems usually have a " -"server and multiple clients. We, therefore, have to start the server " -"first:" -msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +#: ../../source/ref-changelog.md:762 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "支持 IPv4 和 IPv6 地址。" -#: ../../source/tutorial-quickstart-scikitlearn.rst:286 +#: ../../source/ref-changelog.md:764 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this example can be found in :code:`examples/sklearn-logreg-" -"mnist`." +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " -"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" - -#: ../../source/tutorial-quickstart-tensorflow.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" - -#: ../../source/tutorial-quickstart-tensorflow.rst:5 -msgid "Quickstart TensorFlow" -msgstr "快速入门 TensorFlow" - -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "在导入 Flower 之前,我们必须先安装它:" - -#: ../../source/tutorial-quickstart-tensorflow.rst:21 -msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" +"** 添加使用 fastai 和 Flower 进行联邦学习的新示例** " +"([#1598](https://github.com/adap/flower/pull/1598))" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/ref-changelog.md:766 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"A new code example (`quickstart-fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart-fastai](https://github.com/adap/flower/tree/main/examples" +"/quickstart-fastai)." msgstr "" -"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " -":code:`tf.keras.datasets.cifar10.load_data()` 会下载 CIFAR10,将其缓存到本地,然后以 " -"NumPy ndarrays 的形式返回整个训练集和测试集。" - -#: ../../source/tutorial-quickstart-tensorflow.rst:47 -msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" -msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" +"一个新的代码示例(`quickstart-fastai`)演示了使用 [fastai](https://www.fast.ai/) 和 " +"Flower 的联邦学习。您可以在这里找到它: [quickstart-" +"fastai](https://github.com/adap/flower/tree/main/examples/quickstart-" +"fastai)。" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/ref-changelog.md:768 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras 时,该类可以更轻松地实现 " -":code:`Client` 接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" - -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "每个客户都有自己的数据集。" - -#: ../../source/tutorial-quickstart-tensorflow.rst:137 -msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" -msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" +"**使安卓示例兼容** `flwr >= 1.0.0` **和最新版本的安卓** " +"([#1603](https://github.com/adap/flower/pull/1603))" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/ref-changelog.md:770 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " -"`_ 可以在 :code:`examples/quickstart-" -"tensorflow/client.py` 中找到。" +"Android 代码示例已进行了大幅更新:项目兼容 Flower 1.0(及更高版本),用户界面已全面刷新,项目已更新为兼容较新的 Android" +" 工具。" -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/ref-changelog.md:772 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with XGBoost to train classification models on trees." -msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" - -#: ../../source/tutorial-quickstart-xgboost.rst:5 -msgid "Quickstart XGBoost" -msgstr "XGBoost快速入门" - -#: ../../source/tutorial-quickstart-xgboost.rst:14 -msgid "Federated XGBoost" -msgstr "联邦化 XGBoost" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "**添加新的`FedProx`策略** ([#1619](https://github.com/adap/flower/pull/1619))" -#: ../../source/tutorial-quickstart-xgboost.rst:16 +#: ../../source/ref-changelog.md:774 msgid "" -"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " -"implementation of gradient-boosted decision tree (**GBDT**), that " -"maximises the computational boundaries for boosted tree methods. It's " -"primarily designed to enhance both the performance and computational " -"speed of machine learning models. In XGBoost, trees are constructed " -"concurrently, unlike the sequential approach taken by GBDT." +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." msgstr "" -"EXtreme Gradient " -"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" -" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" +"该[策略](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)与[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)几乎相同,但可以帮助用户复现本[论文](https://arxiv.org/abs/1812.06127)中的描述。它的本质是添加一个名为" +" `proximal_mu`的参数,使局部模型与全局模型正则化。" -#: ../../source/tutorial-quickstart-xgboost.rst:20 +#: ../../source/ref-changelog.md:776 msgid "" -"Often, for tabular data on medium-sized datasets with fewer than 10k " -"training examples, XGBoost surpasses the results of deep learning " -"techniques." -msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" - -#: ../../source/tutorial-quickstart-xgboost.rst:23 -msgid "Why federated XGBoost?" -msgstr "为什么选择联邦 XGBoost?" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "**为遥测事件添加新指标**([#1640](https://github.com/adap/flower/pull/1640))" -#: ../../source/tutorial-quickstart-xgboost.rst:25 +#: ../../source/ref-changelog.md:778 msgid "" -"Indeed, as the demand for data privacy and decentralized learning grows, " -"there's an increasing requirement to implement federated XGBoost systems " -"for specialised applications, like survival analysis and financial fraud " -"detection." -msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "例如,更新后的事件结构可以将同一工作负载中的事件集中在一起。" -#: ../../source/tutorial-quickstart-xgboost.rst:27 +#: ../../source/ref-changelog.md:780 msgid "" -"Federated learning ensures that raw data remains on the local device, " -"making it an attractive approach for sensitive domains where data " -"security and privacy are paramount. Given the robustness and efficiency " -"of XGBoost, combining it with federated learning offers a promising " -"solution for these specific challenges." -msgstr "" -"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " -"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "**添加新的自定义策略教程部分** [#1623](https://github.com/adap/flower/pull/1623)" -#: ../../source/tutorial-quickstart-xgboost.rst:30 +#: ../../source/ref-changelog.md:782 msgid "" -"In this tutorial we will learn how to train a federated XGBoost model on " -"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " -"example (`full code xgboost-quickstart " -"`_)" -" with two *clients* and one *server* to demonstrate how federated XGBoost" -" works, and then we dive into a more complex example (`full code xgboost-" -"comprehensive `_) to run various experiments." +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" msgstr "" -"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " -"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " -"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" -"comprehensive `_),以运行各种实验。" - -#: ../../source/tutorial-quickstart-xgboost.rst:37 -msgid "Environment Setup" -msgstr "环境设定" - -#: ../../source/tutorial-quickstart-xgboost.rst:41 -msgid "" -"We first need to install Flower and Flower Datasets. You can do this by " -"running :" -msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" - -#: ../../source/tutorial-quickstart-xgboost.rst:47 -msgid "" -"Since we want to use :code:`xgboost` package to build up XGBoost trees, " -"let's go ahead and install :code:`xgboost`:" -msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" - -#: ../../source/tutorial-quickstart-xgboost.rst:57 -msgid "" -"*Clients* are responsible for generating individual weight-updates for " -"the model based on their local datasets. Now that we have all our " -"dependencies installed, let's run a simple distributed training with two " -"clients and one server." -msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" - -#: ../../source/tutorial-quickstart-xgboost.rst:60 -msgid "" -"In a file called :code:`client.py`, import xgboost, Flower, Flower " -"Datasets and other related functions:" -msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" - -#: ../../source/tutorial-quickstart-xgboost.rst:87 -msgid "Dataset partition and hyper-parameter selection" -msgstr "数据集划分和超参数选择" +"Flower 教程新增了一个章节,介绍如何从零开始实施自定义策略: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:89 +#: ../../source/ref-changelog.md:784 msgid "" -"Prior to local training, we require loading the HIGGS dataset from Flower" -" Datasets and conduct data partitioning for FL:" -msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "** 添加新的自定义序列化教程部分** ([#1622](https://github.com/adap/flower/pull/1622))" -#: ../../source/tutorial-quickstart-xgboost.rst:102 +#: ../../source/ref-changelog.md:786 msgid "" -"In this example, we split the dataset into two partitions with uniform " -"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " -"the partition for the given client based on :code:`node_id`:" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" msgstr "" -"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" -" :code:`node_id` 为给定客户端加载分区:" - -#: ../../source/tutorial-quickstart-xgboost.rst:121 -msgid "" -"After that, we do train/test splitting on the given partition (client's " -"local data), and transform data format for :code:`xgboost` package." -msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" +"Flower 教程现在新增了一个章节,介绍自定义序列化: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" -#: ../../source/tutorial-quickstart-xgboost.rst:134 +#: ../../source/ref-changelog.md:788 msgid "" -"The functions of :code:`train_test_split` and " -":code:`transform_dataset_to_dmatrix` are defined as below:" -msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" - -#: ../../source/tutorial-quickstart-xgboost.rst:158 -msgid "Finally, we define the hyper-parameters used for XGBoost training." -msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" - -#: ../../source/tutorial-quickstart-xgboost.rst:174 -msgid "" -"The :code:`num_local_round` represents the number of iterations for local" -" tree boost. We use CPU for the training in default. One can shift it to " -"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " -"evaluation metric." +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" msgstr "" -"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " -"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" - -#: ../../source/tutorial-quickstart-xgboost.rst:181 -msgid "Flower client definition for XGBoost" -msgstr "用于 XGBoost 的 Flower 客户端定义" +"**普通改进** ([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github. com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github. com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github. com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" -#: ../../source/tutorial-quickstart-xgboost.rst:183 +#: ../../source/ref-changelog.md:792 msgid "" -"After loading the dataset we define the Flower client. We follow the " -"general rule to define :code:`XgbClient` class inherited from " -":code:`fl.client.Client`." +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" msgstr "" -"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " -":code:`XgbClient` 类。" +"** 更新文档** ([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614)))" -#: ../../source/tutorial-quickstart-xgboost.rst:193 +#: ../../source/ref-changelog.md:794 ../../source/ref-changelog.md:861 msgid "" -"The :code:`self.bst` is used to keep the Booster objects that remain " -"consistent across rounds, allowing them to store predictions from trees " -"integrated in earlier rounds and maintain other essential data structures" -" for training." -msgstr "" -"代码:`self.bst`用于保存在各轮中保持一致的 Booster " -"对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "和往常一样,我们的文档有了很大的改进。这是我们努力使 Flower 文档成为所有项目中最好文档的又一步骤。请继续关注,并随时提供反馈意见!" -#: ../../source/tutorial-quickstart-xgboost.rst:196 -msgid "" -"Then, we override :code:`get_parameters`, :code:`fit` and " -":code:`evaluate` methods insides :code:`XgbClient` class as follows." -msgstr "" -"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " -":code:`evaluate` 方法如下。" +#: ../../source/ref-changelog.md:800 +msgid "v1.2.0 (2023-01-13)" +msgstr "v1.2.0 (2023-01-13)" -#: ../../source/tutorial-quickstart-xgboost.rst:210 +#: ../../source/ref-changelog.md:806 msgid "" -"Unlike neural network training, XGBoost trees are not started from a " -"specified random weights. In this case, we do not use " -":code:`get_parameters` and :code:`set_parameters` to initialise model " -"parameters for XGBoost. As a result, let's return an empty tensor in " -":code:`get_parameters` when it is called by the server at the first " -"round." +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" msgstr "" -"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " -":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " -":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" -#: ../../source/tutorial-quickstart-xgboost.rst:251 +#: ../../source/ref-changelog.md:810 msgid "" -"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " -"up the first set of trees. the returned Booster object and config are " -"stored in :code:`self.bst` and :code:`self.config`, respectively. From " -"the second round, we load the global model sent from server to " -":code:`self.bst`, and then update model weights on local training data " -"with function :code:`local_boost` as follows:" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" msgstr "" -"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " -"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " -":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" +"**引入新的 Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" -#: ../../source/tutorial-quickstart-xgboost.rst:269 +#: ../../source/ref-changelog.md:812 msgid "" -"Given :code:`num_local_round`, we update trees by calling " -":code:`self.bst.update` method. After training, the last " -":code:`N=num_local_round` trees will be extracted to send to the server." +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" msgstr "" -"给定 :code:`num_local_round`,我们通过调用 " -":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " -"树并发送给服务器。" - -#: ../../source/tutorial-quickstart-xgboost.rst:291 -msgid "" -"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " -"conduct evaluation on valid set. The AUC value will be returned." -msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" +"在未来几周内,我们将发布一些新的参考,特别是对 FL " +"新手有用的方法。它们通常会重温文献中的知名论文,适合集成到您自己的应用程序中或用于实验,以加深您对 FL " +"的总体了解。今天发布的是该系列中的第一篇。[阅读全文](https://flower.ai/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" -#: ../../source/tutorial-quickstart-xgboost.rst:294 +#: ../../source/ref-changelog.md:814 msgid "" -"Now, we can create an instance of our class :code:`XgbClient` and add one" -" line to actually run this client:" -msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "**改进模拟中的 GPU 支持**([#1555](https://github.com/adap/flower/pull/1555))" -#: ../../source/tutorial-quickstart-xgboost.rst:300 +#: ../../source/ref-changelog.md:816 msgid "" -"That's it for the client. We only have to implement :code:`Client`and " -"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " -"tells the client which server to connect to. In our case we can run the " -"server and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." msgstr "" -"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" +"基于 Ray 的虚拟客户端引擎 (`start_simulation`)已更新,以改进对 GPU 的支持。此次更新包含了在 GPU " +"集群环境中扩展模拟的一些经验教训。新的默认设置使基于 GPU 的模拟运行更加稳健。" -#: ../../source/tutorial-quickstart-xgboost.rst:311 +#: ../../source/ref-changelog.md:818 msgid "" -"These updates are then sent to the *server* which will aggregate them to " -"produce a better model. Finally, the *server* sends this improved version" -" of the model back to each *client* to finish a complete FL round." +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" msgstr "" -"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" -" FL。" +"**改进 Jupyter Notebook 教程中的 GPU 支持** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" -#: ../../source/tutorial-quickstart-xgboost.rst:314 +#: ../../source/ref-changelog.md:820 msgid "" -"In a file named :code:`server.py`, import Flower and FedXgbBagging from " -":code:`flwr.server.strategy`." +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" msgstr "" -"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " -"FedXgbBagging。" - -#: ../../source/tutorial-quickstart-xgboost.rst:316 -msgid "We first define a strategy for XGBoost bagging aggregation." -msgstr "我们首先定义了 XGBoost bagging聚合策略。" +"一些用户报告说,在 GPU 实例上使用 Jupyter 笔记本并不是很方便。我们听取了他们的意见,并对所有 Jupyter " +"笔记本进行了改进!点击这里查看更新后的笔记本:" -#: ../../source/tutorial-quickstart-xgboost.rst:339 +#: ../../source/ref-changelog.md:822 msgid "" -"We use two clients for this example. An " -":code:`evaluate_metrics_aggregation` function is defined to collect and " -"wighted average the AUC values from clients." +"[An Introduction to Federated Learning](https://flower.ai/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" msgstr "" -"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " -"值并求取平均值。" - -#: ../../source/tutorial-quickstart-xgboost.rst:342 -msgid "Then, we start the server:" -msgstr "然后,我们启动服务器:" - -#: ../../source/tutorial-quickstart-xgboost.rst:354 -msgid "Tree-based bagging aggregation" -msgstr "基于树的bagging聚合" - -#: ../../source/tutorial-quickstart-xgboost.rst:356 -msgid "" -"You must be curious about how bagging aggregation works. Let's look into " -"the details." -msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" +"[联邦学习简介](https://flower.ai/docs/framework/tutorial-get-started-with-" +"flower-pytorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:358 +#: ../../source/ref-changelog.md:823 msgid "" -"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " -":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." -" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " -"and :code:`evaluate` methods as follows:" +"[Strategies in Federated Learning](https://flower.ai/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" msgstr "" -"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " -":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " -":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" +"[联邦学习策略](https://flower.ai/docs/framework/tutorial-use-a-federated-" +"learning-strategy-pytorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:454 +#: ../../source/ref-changelog.md:824 msgid "" -"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " -"trees by calling :code:`aggregate()` function:" +"[Building a Strategy](https://flower.ai/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" msgstr "" -"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" -" 树:" +"[制定策略](https://flower.ai/docs/framework/tutorial-build-a-strategy-from-" +"scratch-pytorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:513 +#: ../../source/ref-changelog.md:825 msgid "" -"In this function, we first fetch the number of trees and the number of " -"parallel trees for the current and previous model by calling " -":code:`_get_tree_nums`. Then, the fetched information will be aggregated." -" After that, the trees (containing model weights) are aggregated to " -"generate a new tree model." +"[Client and NumPyClient](https://flower.ai/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" msgstr "" -"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " -"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" +"[客户端和 NumPyClient](https://flower.ai/docs/framework/tutorial-customize-" +"the-client-pytorch.html)" -#: ../../source/tutorial-quickstart-xgboost.rst:518 +#: ../../source/ref-changelog.md:827 msgid "" -"After traversal of all clients' models, a new global model is generated, " -"followed by the serialisation, and sending back to each client." -msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" - -#: ../../source/tutorial-quickstart-xgboost.rst:523 -msgid "Launch Federated XGBoost!" -msgstr "启动联邦 XGBoost!" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" +msgstr "" +"**引入可选遥测**([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584)" -#: ../../source/tutorial-quickstart-xgboost.rst:585 +#: ../../source/ref-changelog.md:829 msgid "" -"Congratulations! You've successfully built and run your first federated " -"XGBoost system. The AUC values can be checked in " -":code:`metrics_distributed`. One can see that the average AUC increases " -"over FL rounds." +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." msgstr "" -"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " -"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" +"在社区发出[反馈请求](https://github.com/adap/flower/issues/1534)之后,Flower " +"开放源码项目引入了可选的*匿名*使用指标收集,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#: ../../source/tutorial-quickstart-xgboost.rst:590 +#: ../../source/ref-changelog.md:831 msgid "" -"The full `source code `_ for this example can be found in :code:`examples" -"/xgboost-quickstart`." +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.ai/docs/telemetry.html)." msgstr "" -"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" +"**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower " +"遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。[阅读全文](https://flower.ai/docs/telemetry.html)。" -#: ../../source/tutorial-quickstart-xgboost.rst:594 -msgid "Comprehensive Federated XGBoost" -msgstr "综合的联邦 XGBoost" - -#: ../../source/tutorial-quickstart-xgboost.rst:596 -#, fuzzy +#: ../../source/ref-changelog.md:833 msgid "" -"Now that you have known how federated XGBoost work with Flower, it's time" -" to run some more comprehensive experiments by customising the " -"experimental settings. In the xgboost-comprehensive example (`full code " -"`_), we provide more options to define various experimental" -" setups, including aggregation strategies, data partitioning and " -"centralised/distributed evaluation. We also support :doc:`Flower " -"simulation ` making it easy to simulate large " -"client cohorts in a resource-aware manner. Let's take a look!" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" msgstr "" -"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" -"comprehensive 示例 (`完整代码 " -"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" - -#: ../../source/tutorial-quickstart-xgboost.rst:603 -#, fuzzy -msgid "Cyclic training" -msgstr "集中式训练" +"**引入(试验性)Driver API** ([#1520](https://github.com/adap/flower/pull/1520)," +" [#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" -#: ../../source/tutorial-quickstart-xgboost.rst:605 -#, fuzzy +#: ../../source/ref-changelog.md:835 msgid "" -"In addition to bagging aggregation, we offer a cyclic training scheme, " -"which performs FL in a client-by-client fashion. Instead of aggregating " -"multiple clients, there is only one single client participating in the " -"training per round in the cyclic training scenario. The trained local " -"XGBoost trees will be passed to the next client as an initialised model " -"for next round's boosting." +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." msgstr "" -"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " -"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " -"树将传递给下一个客户端,作为下一轮提升的初始化模型。" +"Flower 现在有了一个新的(试验性的)驱动程序应用程序接口(Driver " +"API),它将支持完全可编程、异步和多租户的联邦学习(Federated Learning)和联邦分析(Federated " +"Analytics)应用程序。展望未来,Driver API 将成为许多即将推出的功能的抽象基础,您现在就可以开始构建这些功能。" -#: ../../source/tutorial-quickstart-xgboost.rst:609 -#, fuzzy +#: ../../source/ref-changelog.md:837 msgid "" -"To do this, we first customise a :code:`ClientManager` in " -":code:`server_utils.py`:" -msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." +msgstr "" +"驱动程序应用程序接口还支持一种新的执行模式,在这种模式下,服务器可无限期运行。多个单独的工作负载可以同时运行,并独立于服务器启动和停止执行。这对于希望在生产中部署" +" Flower 的用户来说尤其有用。" -#: ../../source/tutorial-quickstart-xgboost.rst:649 -#, fuzzy +#: ../../source/ref-changelog.md:839 msgid "" -"The customised :code:`ClientManager` samples all available clients in " -"each FL round based on the order of connection to the server. Then, we " -"define a new strategy :code:`FedXgbCyclic` in " -":code:`flwr.server.strategy.fedxgb_cyclic.py`, in order to sequentially " -"select only one client in given round and pass the received model to next" -" client." -msgstr "" -"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " -":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " -":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "要了解更多信息,请查看 `mt-pytorch` 代码示例。我们期待您的反馈!" -#: ../../source/tutorial-quickstart-xgboost.rst:690 -#, fuzzy +#: ../../source/ref-changelog.md:841 msgid "" -"Unlike the original :code:`FedAvg`, we don't perform aggregation here. " -"Instead, we just make a copy of the received client model as global model" -" by overriding :code:`aggregate_fit`." -msgstr "" -"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " -"将接收到的客户端模型复制为全局模型。" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "请注意:Driver API仍处于试验阶段,随着时间的推移可能会发生重大变化。*" -#: ../../source/tutorial-quickstart-xgboost.rst:693 -#, fuzzy +#: ../../source/ref-changelog.md:843 msgid "" -"Also, the customised :code:`configure_fit` and :code:`configure_evaluate`" -" methods ensure the clients to be sequentially selected given FL round:" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" msgstr "" -"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " -"轮中按顺序选择客户:" - -#: ../../source/tutorial-quickstart-xgboost.rst:757 -msgid "Customised data partitioning" -msgstr "定制数据分区" +"** 添加新的使用 Pandas " +"的联邦分析示例**([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535)" -#: ../../source/tutorial-quickstart-xgboost.rst:759 +#: ../../source/ref-changelog.md:845 msgid "" -"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" -" to instantiate the data partitioner based on the given " -":code:`num_partitions` and :code:`partitioner_type`. Currently, we " -"provide four supported partitioner type to simulate the uniformity/non-" -"uniformity in data quantity (uniform, linear, square, exponential)." +"A new code example (`quickstart-pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: [quickstart-" +"pandas](https://github.com/adap/flower/tree/main/examples/quickstart-" +"pandas)." msgstr "" -"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " -":code:`num_partitions` 和 :code:`partitioner_type` " -"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" - -#: ../../source/tutorial-quickstart-xgboost.rst:790 -msgid "Customised centralised/distributed evaluation" -msgstr "定制的集中/分布式评估" +"新代码示例(`quickstart-pandas`)演示了使用 Pandas 和 Flower 进行联邦分析。您可以在此处找到它: " +"[quickstart-pandas](https://github.com/adap/flower/tree/main/examples" +"/quickstart-pandas)。" -#: ../../source/tutorial-quickstart-xgboost.rst:792 -#, fuzzy +#: ../../source/ref-changelog.md:847 msgid "" -"To facilitate centralised evaluation, we define a function in " -":code:`server_utils.py`:" -msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" +msgstr "" +"**添加新策略: Krum 和 MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" -#: ../../source/tutorial-quickstart-xgboost.rst:824 +#: ../../source/ref-changelog.md:849 msgid "" -"This function returns a evaluation function which instantiates a " -":code:`Booster` object and loads the global model weights to it. The " -"evaluation is conducted by calling :code:`eval_set()` method, and the " -"tested AUC value is reported." +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." msgstr "" -"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " -":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" +"罗马萨皮恩扎大学(Sapienza University)计算机科学专业的学生埃多尔多(Edoardo)提出了一种新的 \"Krum " +"\"策略,使用户能够在其工作负载中轻松使用 Krum 和 MultiKrum。" -#: ../../source/tutorial-quickstart-xgboost.rst:827 -#, fuzzy +#: ../../source/ref-changelog.md:851 msgid "" -"As for distributed evaluation on the clients, it's same as the quick-" -"start example by overriding the :code:`evaluate()` method insides the " -":code:`XgbClient` class in :code:`client_utils.py`." +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" msgstr "" -"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " -":code:`evaluate()` 方法。" +"** 更新 C++ 示例,与 Flower v1.2.0 兼容** " +"([#1495](https://github.com/adap/flower/pull/1495))" -#: ../../source/tutorial-quickstart-xgboost.rst:831 -#, fuzzy -msgid "Flower simulation" -msgstr "运行模拟" +#: ../../source/ref-changelog.md:853 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "为了与最新版本的 Flower 兼容,C++ 示例代码进行了大幅更新。" -#: ../../source/tutorial-quickstart-xgboost.rst:832 -#, fuzzy +#: ../../source/ref-changelog.md:855 msgid "" -"We also provide an example code (:code:`sim.py`) to use the simulation " -"capabilities of Flower to simulate federated XGBoost training on either a" -" single machine or a cluster of machines." -msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" +msgstr "" +"**普通改进** ([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github. com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" -#: ../../source/tutorial-quickstart-xgboost.rst:866 -#, fuzzy +#: ../../source/ref-changelog.md:859 msgid "" -"After importing all required packages, we define a :code:`main()` " -"function to perform the simulation process:" -msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" +msgstr "" +"** 更新文档** ([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" -#: ../../source/tutorial-quickstart-xgboost.rst:921 -#, fuzzy +#: ../../source/ref-changelog.md:863 msgid "" -"We first load the dataset and perform data partitioning, and the pre-" -"processed data is stored in a :code:`list`. After the simulation begins, " -"the clients won't need to pre-process their partitions again." -msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" +"One highlight is the new [first time contributor " +"guide](https://flower.ai/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" +msgstr "" +"其中一个亮点是新的[首次贡献者指南](https://flower.ai/docs/first-time-" +"contributors.html):如果你以前从未在 GitHub 上做过贡献,这将是一个完美的开始!" -#: ../../source/tutorial-quickstart-xgboost.rst:924 -#, fuzzy -msgid "Then, we define the strategies and other hyper-parameters:" -msgstr "然后,我们定义策略和其他超参数:" +#: ../../source/ref-changelog.md:869 +msgid "v1.1.0 (2022-10-31)" +msgstr "v1.1.0 (2022-10-31)" -#: ../../source/tutorial-quickstart-xgboost.rst:975 -#, fuzzy +#: ../../source/ref-changelog.md:873 msgid "" -"After that, we start the simulation by calling " -":code:`fl.simulation.start_simulation`:" -msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "在此,我们向所有促成 Flower 新版本的贡献者致以**特别的谢意(按 \"git shortlog \"顺序排列):" -#: ../../source/tutorial-quickstart-xgboost.rst:995 -#, fuzzy +#: ../../source/ref-changelog.md:875 msgid "" -"One of key parameters for :code:`start_simulation` is :code:`client_fn` " -"which returns a function to construct a client. We define it as follows:" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" msgstr "" -":code:`start_simulation` 的一个关键参数是 " -":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1038 -msgid "Arguments parser" -msgstr "参数解析器" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" -#: ../../source/tutorial-quickstart-xgboost.rst:1040 -#, fuzzy +#: ../../source/ref-changelog.md:879 msgid "" -"In :code:`utils.py`, we define the arguments parsers for clients, server " -"and simulation, allowing users to specify different experimental " -"settings. Let's first see the sever side:" -msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" +msgstr "" +"**引入差分隐私包装器(预览)** ([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" -#: ../../source/tutorial-quickstart-xgboost.rst:1086 -#, fuzzy +#: ../../source/ref-changelog.md:881 msgid "" -"This allows user to specify training strategies / the number of total " -"clients / FL rounds / participating clients / clients for evaluation, and" -" evaluation fashion. Note that with :code:`--centralised-eval`, the sever" -" will do centralised evaluation and all functionalities for client " -"evaluation will be disabled." +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." msgstr "" -"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" -"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1090 -msgid "Then, the argument parser on client side:" -msgstr "然后是客户端的参数解析器:" +"可插拔差分隐私封装器的首个(实验性)预览版可轻松配置和使用差分隐私(DP)。可插拔的差分隐私封装器可实现客户端差分隐私和服务器端差分隐私的框架无关**以及**策略无关的使用。请访问" +" Flower 文档,新的解释器会提供更多细节。" -#: ../../source/tutorial-quickstart-xgboost.rst:1144 -#, fuzzy +#: ../../source/ref-changelog.md:883 msgid "" -"This defines various options for client data partitioning. Besides, " -"clients also have an option to conduct evaluation on centralised test set" -" by setting :code:`--centralised-eval`, as well as an option to perform " -"scaled learning rate based on the number of clients by setting :code" -":`--scaled-lr`." -msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1148 -#, fuzzy -msgid "We also have an argument parser for simulation:" -msgstr "我们还有一个用于模拟的参数解析器:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1226 -#, fuzzy -msgid "This integrates all arguments for both client and server sides." -msgstr "这整合了客户端和服务器端的所有参数。" - -#: ../../source/tutorial-quickstart-xgboost.rst:1229 -msgid "Example commands" -msgstr "命令示例" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "**新的 iOS CoreML 代码示例**([#1289](https://github.com/adap/flower/pull/1289))" -#: ../../source/tutorial-quickstart-xgboost.rst:1231 -#, fuzzy +#: ../../source/ref-changelog.md:885 msgid "" -"To run a centralised evaluated experiment with bagging strategy on 5 " -"clients with exponential distribution for 50 rounds, we first start the " -"server as below:" -msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" - -#: ../../source/tutorial-quickstart-xgboost.rst:1238 -msgid "Then, on each client terminal, we start the clients:" -msgstr "然后,我们在每个客户终端上启动客户机:" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." +msgstr "" +"Flower 进入 iOS!大量新代码示例展示了如何为 iOS 构建 Flower 客户端。该代码示例包含可用于多种任务的 Flower iOS " +"SDK 组件,以及在 CoreML 上运行的一个任务示例。" -#: ../../source/tutorial-quickstart-xgboost.rst:1244 -#, fuzzy -msgid "To run the same experiment with Flower simulation:" -msgstr "运行与 Flower 模拟相同的实验:" +#: ../../source/ref-changelog.md:887 +msgid "" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "**新的联邦医疗策略** ([#1461](https://github.com/adap/flower/pull/1461))" -#: ../../source/tutorial-quickstart-xgboost.rst:1250 -#, fuzzy +#: ../../source/ref-changelog.md:889 msgid "" -"The full `code `_ for this comprehensive example can be found in" -" :code:`examples/xgboost-comprehensive`." +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." msgstr "" -"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" +"新的 \"FedMedian \"战略实现了[Yin " +"等人,2018]的联邦中值(FedMedian)(https://arxiv.org/pdf/1803.01498v1.pdf)。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 -msgid "Build a strategy from scratch" -msgstr "从零开始制定策略" +#: ../../source/ref-changelog.md:891 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "**虚拟客户端引擎中的**日志**`客户端`**异常([#1493](https://github.com/adap/flower/pull/1493))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:893 msgid "" -"Welcome to the third part of the Flower federated learning tutorial. In " -"previous parts of this tutorial, we introduced federated learning with " -"PyTorch and Flower (`part 1 `__) and we learned how strategies " -"can be used to customize the execution on both the server and the clients" -" (`part 2 `__)." -msgstr "" -"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__)。" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "VCE 中发生的所有 \"客户端 \"异常现在都会被默认记录下来,而不只是暴露给配置的 `Strategy`(通过 `failures`参数)。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:895 msgid "" -"In this notebook, we'll continue to customize the federated learning " -"system we built previously by creating a custom version of FedAvg (again," -" using `Flower `__ and `PyTorch " -"`__)." -msgstr "" -"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " -"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "**改进虚拟客户端引擎内部**([#1401](https://github.com/adap/flower/pull/1401)、[#1453](https://github.com/adap/flower/pull/1453))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#: ../../source/ref-changelog.md:897 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the Flower community on Slack to connect, ask questions, and get help: " -"`Join Slack `__ 🌼 We'd love to hear from " -"you in the ``#introductions`` channel! And if anything is unclear, head " -"over to the ``#questions`` channel." +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " -"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 -msgid "Let's build a new ``Strategy`` from scratch!" -msgstr "让我们从头开始构建一个新的``Strategy``!" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 -msgid "Preparation" -msgstr "准备工作" +"虚拟客户端引擎的部分内部结构已进行了修改。VCE 现在使用 Ray 2.0,\"client_resources \"字典的值类型改为 " +"\"float\",以允许分配分数资源。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:899 msgid "" -"Before we begin with the actual code, let's make sure that we have " -"everything we need." -msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 -msgid "Installing dependencies" -msgstr "安装依赖项" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 -msgid "First, we install the necessary packages:" -msgstr "首先,我们安装必要的软件包:" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "**支持虚拟客户端引擎中的可选** `Client`**/**`NumPyClient` **方法**" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +#: ../../source/ref-changelog.md:901 msgid "" -"Now that we have all dependencies installed, we can import everything we " -"need for this tutorial:" -msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "虚拟客户端引擎现在完全支持可选的 `Client`(和 `NumPyClient`)方法。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +#: ../../source/ref-changelog.md:903 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " -"GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 -msgid "Data loading" -msgstr "数据加载" +"**使用** `flwr`向软件包提供类型信息 " +"([#1377](https://github.com/adap/flower/pull/1377))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +#: ../../source/ref-changelog.md:905 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``. We introduce a new parameter" -" ``num_clients`` which allows us to call ``load_datasets`` with different" -" numbers of clients." +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " -"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 -msgid "Model training/evaluation" -msgstr "模型培训/评估" +"软件包 `flwr` 现在捆绑了一个 `py.typed` 文件,表明该软件包是类型化的。这样,使用 `flwr` 的项目或软件包就可以使用 " +"`mypy` 等静态类型检查器改进代码,从而获得类型支持。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +#: ../../source/ref-changelog.md:907 msgid "" -"Let's continue with the usual model definition (including " -"``set_parameters`` and ``get_parameters``), training and test functions:" -msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" +msgstr "" +"** 更新代码示例** ([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 -msgid "Flower client" -msgstr "Flower 客户端" +#: ../../source/ref-changelog.md:909 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +#: ../../source/ref-changelog.md:911 msgid "" -"To implement the Flower client, we (again) create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " -"``cid`` to the client and use it log additional details:" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" msgstr "" -"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " -"传递给客户端,并使用它记录其他详细信息:" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 -msgid "Let's test what we have so far before we continue:" -msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" - -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 -msgid "Build a Strategy from scratch" -msgstr "从零开始构建策略" +"**更新文档** ([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github. com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:913 msgid "" -"Let’s overwrite the ``configure_fit`` method such that it passes a higher" -" learning rate (potentially also other hyperparameters) to the optimizer " -"of a fraction of the clients. We will keep the sampling of the clients as" -" it is in ``FedAvg`` and then change the configuration dictionary (one of" -" the ``FitIns`` attributes)." -msgstr "" -"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " -"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "文档更新的数量之多,甚至没有必要逐一列出。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +#: ../../source/ref-changelog.md:915 msgid "" -"The only thing left is to use the newly created custom Strategy " -"``FedCustom`` when starting the experiment:" -msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "**重构文档**([#1387](https://github.com/adap/flower/pull/1387))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 -msgid "Recap" -msgstr "回顾" +#: ../../source/ref-changelog.md:917 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "我们对文档进行了重组,使其更易于浏览。这只是让 Flower 文档成为所有项目中最好文档的第一步。敬请期待!" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +#: ../../source/ref-changelog.md:919 msgid "" -"In this notebook, we’ve seen how to implement a custom strategy. A custom" -" strategy enables granular control over client node configuration, result" -" aggregation, and more. To define a custom strategy, you only have to " -"overwrite the abstract methods of the (abstract) base class ``Strategy``." -" To make custom strategies even more powerful, you can pass custom " -"functions to the constructor of your new class (``__init__``) and then " -"call these functions whenever needed." -msgstr "" -"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " -"``Strategy`` " -"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "**在 Colab 中打开按钮** ([#1389](https://github.com/adap/flower/pull/1389))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:729 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +#: ../../source/ref-changelog.md:921 msgid "" -"Before you continue, make sure to join the Flower community on Slack: " -"`Join Slack `__" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." msgstr "" -"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" +"Flower 联邦学习教程的四个部分现在都带有一个新的 \"在 Colab 中打开 " +"\"按钮。现在,您无需在本地计算机上安装任何软件,只需点击一下,就可以在浏览器中使用和学习 Flower。" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +#: ../../source/ref-changelog.md:923 msgid "" -"There's a dedicated ``#questions`` channel if you need help, but we'd " -"also love to hear who you are in ``#introductions``!" -msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" +msgstr "" +"**改进教程** ([#1468](https://github.com/adap/flower/pull/1468), " +"[#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475)))" -#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +#: ../../source/ref-changelog.md:925 msgid "" -"The `Flower Federated Learning Tutorial - Part 4 " -"`__ introduces ``Client``, the flexible API underlying " -"``NumPyClient``." +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." msgstr "" -"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" +"Flower 联邦学习教程有两个全新的部分,涉及自定义策略(仍处于 WIP 阶段)和 `Client` 与 `NumPyClient` " +"之间的区别。现有的第一和第二部分也得到了改进(许多小改动和修正)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 -msgid "Customize the client" -msgstr "自定义客户端" +#: ../../source/ref-changelog.md:931 +msgid "v1.0.0 (2022-07-28)" +msgstr "v1.0.0 (2022-07-28)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 -msgid "" -"Welcome to the fourth part of the Flower federated learning tutorial. In " -"the previous parts of this tutorial, we introduced federated learning " -"with PyTorch and Flower (`part 1 `__), we learned how " -"strategies can be used to customize the execution on both the server and " -"the clients (`part 2 `__), and we built our own " -"custom strategy from scratch (`part 3 `__)." -msgstr "" -"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " -"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " -"`__),并从头开始构建了我们自己的定制策略(`part 3 " -"`__)。" +#: ../../source/ref-changelog.md:933 +msgid "Highlights" +msgstr "亮点" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 -msgid "" -"In this notebook, we revisit ``NumPyClient`` and introduce a new " -"baseclass for building clients, simply named ``Client``. In previous " -"parts of this tutorial, we've based our client on ``NumPyClient``, a " -"convenience class which makes it easy to work with machine learning " -"libraries that have good NumPy interoperability. With ``Client``, we gain" -" a lot of flexibility that we didn't have before, but we'll also have to " -"do a few things the we didn't have to do before." -msgstr "" -"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " -"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" -" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" +#: ../../source/ref-changelog.md:935 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "稳定的**虚拟客户端引擎**(可通过`start_simulation`访问)" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 -msgid "" -"Let's go deeper and see what it takes to move from ``NumPyClient`` to " -"``Client``!" -msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" +#: ../../source/ref-changelog.md:936 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "所有 `Client`/`NumPyClient` 方法现在都是可选的了" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 -msgid "Step 0: Preparation" -msgstr "步骤 0:准备工作" +#: ../../source/ref-changelog.md:937 +msgid "Configurable `get_parameters`" +msgstr "可配置的`get_parameters`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +#: ../../source/ref-changelog.md:938 msgid "" -"Let's now load the CIFAR-10 training and test set, partition them into " -"ten smaller datasets (each split into training and validation set), and " -"wrap everything in their own ``DataLoader``." -msgstr "" -"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " -"``DataLoader`` 中。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 -msgid "Step 1: Revisiting NumPyClient" -msgstr "步骤 1:重温 NumPyClient" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "对大量小型应用程序接口进行了清理,使开发人员的体验更加一致" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +#: ../../source/ref-changelog.md:942 msgid "" -"So far, we've implemented our client by subclassing " -"``flwr.client.NumPyClient``. The three methods we implemented are " -"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " -"creation of instances of this class in a function called ``client_fn``:" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" msgstr "" -"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " -"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " -"``client_fn`` 的函数来创建该类的实例:" +"在此,我们谨向所有促成 Flower 1.0 的贡献者致以**特别的谢意(按[GitHub " +"贡献者](https://github.com/adap/flower/graphs/contributors) 倒序排列):" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +#: ../../source/ref-changelog.md:944 msgid "" -"We've seen this before, there's nothing new so far. The only *tiny* " -"difference compared to the previous notebook is naming, we've changed " -"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " -"``numpyclient_fn``. Let's run it to see the output we get:" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." msgstr "" -"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " -"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sancarlim](https://github.com/sancarlim), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +#: ../../source/ref-changelog.md:948 msgid "" -"This works as expected, two clients are training for three rounds of " -"federated learning." -msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "** 所有参数必须作为关键字参数传递** ([#1338](https://github.com/adap/flower/pull/1338))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +#: ../../source/ref-changelog.md:950 msgid "" -"Let's dive a little bit deeper and discuss how Flower executes this " -"simulation. Whenever a client is selected to do some work, " -"``start_simulation`` calls the function ``numpyclient_fn`` to create an " -"instance of our ``FlowerNumPyClient`` (along with loading the model and " -"the data)." +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." msgstr "" -"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " -"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" +"以关键字参数传递所有参数,不再支持位置参数。使用位置参数的代码(例如,`start_client(\"127.0.0.1:8080\", " +"FlowerClient())`)必须为每个位置参数添加关键字(例如,`start_client(server_address=\"127.0.0.1:8080\"," +" client=FlowerClient())`)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +#: ../../source/ref-changelog.md:952 msgid "" -"But here's the perhaps surprising part: Flower doesn't actually use the " -"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " -"makes it look like a subclass of ``flwr.client.Client``, not " -"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " -"know how to handle ``NumPyClient``'s, it only knows how to handle " -"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " -"top of ``Client``." +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" msgstr "" -"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " -"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " -"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " -"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " -"只是建立在``Client``之上的便捷抽象类。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 -msgid "" -"Instead of building on top of ``NumPyClient``, we can directly build on " -"top of ``Client``." -msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 -msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" -msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" +"**在*** `start_server` ***和*** `start_simulation` 中引入配置对象*** " +"`ServerConfig` ([#1317](https://github.com/adap/flower/pull/1317))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +#: ../../source/ref-changelog.md:954 msgid "" -"Let's try to do the same thing using ``Client`` instead of " -"``NumPyClient``." -msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" +"并非配置字典`{\"num_rounds\": 3, \"round_timeout\": 600.0}`, `start_server`和 " +"`start_simulation`现在用一个类型为 " +"`flwr.server.ServerConfig`的配置对象。`ServerConfig`接收的参数与之前的 config dict " +"相同,但它使编写类型安全代码变得更容易,默认参数值也更加透明。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +#: ../../source/ref-changelog.md:956 msgid "" -"Before we discuss the code in more detail, let's try to run it! Gotta " -"make sure our new ``Client``-based client works, right?" -msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "**重新命名内置策略参数,使其更加清晰** ([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +#: ../../source/ref-changelog.md:958 msgid "" -"That's it, we're now using ``Client``. It probably looks similar to what " -"we've done with ``NumPyClient``. So what's the difference?" -msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "以下内置策略参数已重新命名,以提高可读性并与其他 API 保持一致:" + +#: ../../source/ref-changelog.md:960 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "`fraction_eval` --> `fraction_evaluate`" + +#: ../../source/ref-changelog.md:961 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "`min_eval_clients` --> `min_evaluate_clients`" + +#: ../../source/ref-changelog.md:962 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "`eval_fn` --> `evaluate_fn`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:964 msgid "" -"First of all, it's more code. But why? The difference comes from the fact" -" that ``Client`` expects us to take care of parameter serialization and " -"deserialization. For Flower to be able to send parameters over the " -"network, it eventually needs to turn these parameters into ``bytes``. " -"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " -"serialization. Turning raw bytes into something more useful (like NumPy " -"``ndarray``'s) is called deserialization. Flower needs to do both: it " -"needs to serialize parameters on the server-side and send them to the " -"client, the client needs to deserialize them to use them for local " -"training, and then serialize the updated parameters again to send them " -"back to the server, which (finally!) deserializes them again in order to " -"aggregate them with the updates received from other clients." -msgstr "" -"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " -"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " -"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " -"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "**更新内置策略的默认参数** ([#1278](https://github.com/adap/flower/pull/1278))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +#: ../../source/ref-changelog.md:966 msgid "" -"The only *real* difference between Client and NumPyClient is that " -"NumPyClient takes care of serialization and deserialization for you. It " -"can do so because it expects you to return parameters as NumPy ndarray's," -" and it knows how to handle these. This makes working with machine " -"learning libraries that have good NumPy support (most of them) a breeze." +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" msgstr "" -"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " -"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " -"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" +"所有内置策略现在都使用 \"fraction_fit=1.0 \"和 " +"\"fraction_evaluate=1.0\",这意味着它们会选择*所有*当前可用的客户端进行训练和评估。依赖以前默认值的项目可以通过以下方式初始化策略,获得以前的行为:" + +#: ../../source/ref-changelog.md:968 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +#: ../../source/ref-changelog.md:970 msgid "" -"In terms of API, there's one major difference: all methods in Client take" -" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " -"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " -"``NumPyClient`` on the other hand have multiple arguments (e.g., " -"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" -" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " -"``NumPyClient.fit``) if there are multiple things to handle. These " -"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " -"values you're used to from ``NumPyClient``." +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " -"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " -"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" -" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 -msgid "Step 3: Custom serialization" -msgstr "步骤 3:自定义序列化" +"**添加*** `server_round` ***到*** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +#: ../../source/ref-changelog.md:972 msgid "" -"Here we will explore how to implement custom serialization with a simple " -"example." -msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "`Strategy`的`evaluate` 方法现在会接收当前一轮联邦学习/评估作为第一个参数。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +#: ../../source/ref-changelog.md:974 msgid "" -"But first what is serialization? Serialization is just the process of " -"converting an object into raw bytes, and equally as important, " -"deserialization is the process of converting raw bytes back into an " -"object. This is very useful for network communication. Indeed, without " -"serialization, you could not just a Python object through the internet." +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" msgstr "" -"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" -" Python 对象。" +"**将*** `server_round` **和*** `config` **参数添加到*** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +#: ../../source/ref-changelog.md:976 msgid "" -"Federated Learning relies heavily on internet communication for training " -"by sending Python objects back and forth between the clients and the " -"server. This means that serialization is an essential part of Federated " -"Learning." -msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" +"传递给内置策略(如 `FedAvg`)的 `evaluate_fn` 现在需要三个参数:(1) 当前一轮联邦学习/评估 " +"(`server_round`),(2) 要评估的模型参数 (`parameters`),(3) 配置字典 (`config`)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +#: ../../source/ref-changelog.md:978 msgid "" -"In the following section, we will write a basic example where instead of " -"sending a serialized version of our ``ndarray``\\ s containing our " -"parameters, we will first convert the ``ndarray`` into sparse matrices, " -"before sending them. This technique can be used to save bandwidth, as in " -"certain cases where the weights of a model are sparse (containing many 0 " -"entries), converting them to a sparse matrix can greatly improve their " -"bytesize." +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" msgstr "" -"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " -"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " -"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 -msgid "Our custom serialization/deserialization functions" -msgstr "我们的定制序列化/反序列化功能" +"**重新命名** `rnd` ** to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +#: ../../source/ref-changelog.md:980 msgid "" -"This is where the real serialization/deserialization will happen, " -"especially in ``ndarray_to_sparse_bytes`` for serialization and " -"``sparse_bytes_to_ndarray`` for deserialization." +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." msgstr "" -"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " -"``sparse_bytes_too_ndarray`` 中。" +"几个 Flower " +"方法和函数(`evaluate_fn`、`configure_fit`、`aggregate_fit`、`configure_evaluate`、`aggregate_evaluate`)的第一个参数是当前一轮的联邦学习/评估。为提高可重复性并避免与" +" *random* 混淆,该参数已从 `rnd` 更名为 `server_round`。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +#: ../../source/ref-changelog.md:982 msgid "" -"Note that we imported the ``scipy.sparse`` library in order to convert " -"our arrays." -msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" +"**移动*** `flwr.dataset` **到*** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 -msgid "Client-side" -msgstr "客户端" +#: ../../source/ref-changelog.md:984 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "实验软件包 `flwr.dataset` 已迁移至 Flower Baselines。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:986 msgid "" -"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " -"will just have to call our custom functions in our " -"``flwr.client.Client``." -msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "**删除实验策略** ([#1280](https://github.com/adap/flower/pull/1280))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +#: ../../source/ref-changelog.md:988 msgid "" -"Indeed, in ``get_parameters`` we need to serialize the parameters we got " -"from our network using our custom ``ndarrays_to_sparse_parameters`` " -"defined above." -msgstr "" -"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " -"序列化从网络中获取的参数。" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "移除未维护的试验性策略(`FastAndSlow`、`FedFSv0`、`FedFSv1`)。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +#: ../../source/ref-changelog.md:990 msgid "" -"In ``fit``, we first need to deserialize the parameters coming from the " -"server using our custom ``sparse_parameters_to_ndarrays`` and then we " -"need to serialize our local results with " -"``ndarrays_to_sparse_parameters``." +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " -"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" +"**重新命名** `Weights` **到** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +#: ../../source/ref-changelog.md:992 msgid "" -"In ``evaluate``, we will only need to deserialize the global parameters " -"with our custom function." -msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 -msgid "Server-side" -msgstr "服务器端" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "flwr.common.Weights \"更名为 \"flwr.common.NDArrays\",以更好地反映该类型的含义。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +#: ../../source/ref-changelog.md:994 msgid "" -"For this example, we will just use ``FedAvg`` as a strategy. To change " -"the serialization and deserialization here, we only need to reimplement " -"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" -" functions of the strategy will be inherited from the super class " -"``FedAvg``." +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " -"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 -msgid "As you can see only one line as change in ``evaluate``:" -msgstr "正如你所看到的,``evaluate``中只修改了一行:" +"**从** `start_server` 中移除过时的** `force_final_distributed_eval` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +#: ../../source/ref-changelog.md:996 msgid "" -"And for ``aggregate_fit``, we will first deserialize every result we " -"received:" -msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." +msgstr "" +"start_server \"参数 \"force_final_distributed_eval " +"\"长期以来一直是个历史遗留问题,在此版本中终于永远消失了。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 -msgid "And then serialize the aggregated result:" -msgstr "然后将汇总结果序列化:" +#: ../../source/ref-changelog.md:998 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" +msgstr "" +"**使** `get_parameters` **可配置** " +"([#1242](https://github.com/adap/flower/pull/1242))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 -msgid "We can now run our custom serialization example!" -msgstr "现在我们可以运行自定义序列化示例!" +#: ../../source/ref-changelog.md:1000 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." +msgstr "" +"现在,\"get_parameters \"方法与 \"get_properties\"、\"fit \"和 \"evaluate " +"\"一样,都接受配置字典。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +#: ../../source/ref-changelog.md:1002 msgid "" -"In this part of the tutorial, we've seen how we can build clients by " -"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " -"convenience abstraction that makes it easier to work with machine " -"learning libraries that have good NumPy interoperability. ``Client`` is a" -" more flexible abstraction that allows us to do things that are not " -"possible in ``NumPyClient``. In order to do so, it requires us to handle " -"parameter serialization and deserialization ourselves." +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" msgstr "" -"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " -"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" +"**用新的** `config` 参数** 替换** `num_rounds` ** in** `start_simulation` ** " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +#: ../../source/ref-changelog.md:1004 msgid "" -"This is the final part of the Flower tutorial (for now!), " -"congratulations! You're now well equipped to understand the rest of the " -"documentation. There are many topics we didn't cover in the tutorial, we " -"recommend the following resources:" -msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" - -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 -msgid "`Read Flower Docs `__" -msgstr "阅读Flower文档 `__" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." +msgstr "" +"现在,`start_simulation`(开始模拟)` 函数接受配置字典 `config` 而不是 `num_rounds` 整数。这改进了 " +"`start_simulation` 和 `start_server` 之间的一致性,并使两者之间的转换更容易。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +#: ../../source/ref-changelog.md:1008 msgid "" -"`Check out Flower Code Examples " -"`__" -msgstr "查看 Flower 代码示例 `__" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "** 支持 Python 3.10** ([#1320](https://github.com/adap/flower/pull/1320))" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +#: ../../source/ref-changelog.md:1010 msgid "" -"`Use Flower Baselines for your research " -"`__" -msgstr "使用 \"Flower Baselines \"进行研究 `__" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "上一个 Flower 版本引入了对 Python 3.10 的实验支持,而本版本则宣布对 Python 3.10 的支持为稳定支持。" -#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +#: ../../source/ref-changelog.md:1012 msgid "" -"`Watch Flower Summit 2023 videos `__" -msgstr "观看 2023 年Flower峰会视频 `__" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 -msgid "Get started with Flower" -msgstr "开始使用Flower" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 -msgid "Welcome to the Flower federated learning tutorial!" -msgstr "欢迎阅读Flower联邦学习教程!" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" +msgstr "" +"**使所有** `Client` **和** `NumPyClient` **方法成为可选** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 -#, fuzzy +#: ../../source/ref-changelog.md:1014 msgid "" -"In this notebook, we'll build a federated learning system using Flower, " -"`Flower Datasets `__ and PyTorch. In " -"part 1, we use PyTorch for the model training pipeline and data loading. " -"In part 2, we continue to federate the PyTorch-based pipeline using " -"Flower." +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" msgstr "" -"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " -"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 -msgid "Let's get started!" -msgstr "让我们开始吧!" +"`Client`/`NumPyClient`的 \"get_properties\"、\"get_parameters\"、\"fit \"和 " +"\"evaluate \"方法都是可选的。这样就可以编写只实现 `fit` 而不实现其他方法的客户端。使用集中评估时,无需实现 " +"`evaluate`!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +#: ../../source/ref-changelog.md:1016 msgid "" -"Before we begin with any actual code, let's make sure that we have " -"everything we need." -msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" +"**启用向** `start_simulation` 传递** `Server` 实例 " +"([#1281](https://github.com/adap/flower/pull/1281))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 -#, fuzzy +#: ../../source/ref-changelog.md:1018 msgid "" -"Next, we install the necessary packages for PyTorch (``torch`` and " -"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " -"(``flwr``):" -msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." +msgstr "" +"与 `start_server` 类似,`start_simulation` 现在也接受一个完整的 `Server` " +"实例。这使得用户可以对实验的执行进行大量自定义,并为使用虚拟客户端引擎运行异步 FL 等打开了大门。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:105 -#, fuzzy +#: ../../source/ref-changelog.md:1020 msgid "" -"It is possible to switch to a runtime that has GPU acceleration enabled " -"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" -" GPU > Save``). Note, however, that Google Colab is not always able to " -"offer GPU acceleration. If you see an error related to GPU availability " -"in one of the following sections, consider switching back to CPU-based " -"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " -"has GPU acceleration enabled, you should see the output ``Training on " -"cuda``, otherwise it'll say ``Training on cpu``." +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" msgstr "" -"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " -"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " -"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " -"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:118 -msgid "Loading the data" -msgstr "加载数据" +"**更新代码示例** ([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:120 -#, fuzzy +#: ../../source/ref-changelog.md:1022 msgid "" -"Federated learning can be applied to many different types of tasks across" -" different domains. In this tutorial, we introduce federated learning by " -"training a simple convolutional neural network (CNN) on the popular " -"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " -"distinguish between images from ten different classes: 'airplane', " -"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " -"'truck'." -msgstr "" -"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " -"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "许多代码示例都进行了小规模甚至大规模的维护更新,其中包括" + +#: ../../source/ref-changelog.md:1024 +msgid "`scikit-learn`" +msgstr "`scikit-learn`" + +#: ../../source/ref-changelog.md:1025 +msgid "`simulation_pytorch`" +msgstr "`simulation_pytorch`" + +#: ../../source/ref-changelog.md:1026 +msgid "`quickstart_pytorch`" +msgstr "`quickstart_pytorch`" + +#: ../../source/ref-changelog.md:1027 +msgid "`quickstart_simulation`" +msgstr "`quickstart_simulation`" + +#: ../../source/ref-changelog.md:1028 +msgid "`quickstart_tensorflow`" +msgstr "`quickstart_tensorflow`" + +#: ../../source/ref-changelog.md:1029 +msgid "`advanced_tensorflow`" +msgstr "`advanced_tensorflow`" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:131 +#: ../../source/ref-changelog.md:1031 msgid "" -"We simulate having multiple datasets from multiple organizations (also " -"called the \"cross-silo\" setting in federated learning) by splitting the" -" original CIFAR-10 dataset into multiple partitions. Each partition will " -"represent the data from a single organization. We're doing this purely " -"for experimentation purposes, in the real world there's no need for data " -"splitting because each organization already has their own data (so the " -"data is naturally partitioned)." -msgstr "" -"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " -"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "**删除过时的模拟示例** ([#1328](https://github.com/adap/flower/pull/1328))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:133 -#, fuzzy +#: ../../source/ref-changelog.md:1033 msgid "" -"Each organization will act as a client in the federated learning system. " -"So having ten organizations participate in a federation means having ten " -"clients connected to the federated learning server." -msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" +msgstr "" +"删除过时的 \"simulation \"示例,并将 \"quickstart_simulation \"重命名为 " +"\"simulation_tensorflow\",使其与 \"simulation_pytorch \"的命名一致" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:144 -#, fuzzy +#: ../../source/ref-changelog.md:1035 msgid "" -"Let's now create the Federated Dataset abstraction that from ``flwr-" -"datasets`` that partitions the CIFAR-10. We will create small training " -"and test set for each edge device and wrap each of them into a PyTorch " -"``DataLoader``:" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" msgstr "" -"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " -"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" +"**更新文档** ([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:198 -#, fuzzy +#: ../../source/ref-changelog.md:1037 msgid "" -"We now have a list of ten training sets and ten validation sets " -"(``trainloaders`` and ``valloaders``) representing the data of ten " -"different organizations. Each ``trainloader``/``valloader`` pair contains" -" 4000 training examples and 1000 validation examples. There's also a " -"single ``testloader`` (we did not split the test set). Again, this is " -"only necessary for building research or educational systems, actual " -"federated learning systems have their data naturally distributed across " -"multiple partitions." +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" msgstr "" -"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " -"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " -"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" +"其中一个实质性的文档更新修复了多个较小的渲染问题,使标题更加简洁以改善导航,删除了一个已废弃的库,更新了文档依赖关系,在 API 参考中包含了 " +"`flwr.common` 模块,包含了对基于 markdown 的文档的支持,将更新日志从 `.rst` 移植到了 " +"`.md`,并修复了一些较小的细节!" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:201 +#: ../../source/ref-changelog.md:1039 ../../source/ref-changelog.md:1094 +#: ../../source/ref-changelog.md:1163 ../../source/ref-changelog.md:1202 +msgid "**Minor updates**" +msgstr "**小规模更新**" + +#: ../../source/ref-changelog.md:1041 msgid "" -"Let's take a look at the first batch of images and labels in the first " -"training set (i.e., ``trainloaders[0]``) before we move on:" -msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "添加四舍五入数字,以适应和评估日志信息([#1266](https://github.com/adap/flower/pull/1266))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:240 +#: ../../source/ref-changelog.md:1042 msgid "" -"The output above shows a random batch of images from the first " -"``trainloader`` in our list of ten ``trainloaders``. It also prints the " -"labels associated with each image (i.e., one of the ten possible labels " -"we've seen above). If you run the cell again, you should see another " -"batch of images." +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" msgstr "" -"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " -"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" +"为 `advanced_tensorflow` 代码示例添加安全 gRPC 连接 " +"([#847](https://github.com/adap/flower/pull/847))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:252 -msgid "Step 1: Centralized Training with PyTorch" -msgstr "步骤 1:使用 PyTorch 进行集中训练" +#: ../../source/ref-changelog.md:1043 +msgid "" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" +"更新开发人员工具([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:263 +#: ../../source/ref-changelog.md:1044 msgid "" -"Next, we're going to use PyTorch to define a simple convolutional neural " -"network. This introduction assumes basic familiarity with PyTorch, so it " -"doesn't cover the PyTorch-related aspects in full detail. If you want to " -"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " -"MINUTE BLITZ " -"`__." +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" msgstr "" -"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" -" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " -"minute blitz " -"`__。" +"重命名 ProtoBuf 消息以提高一致性([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:275 -msgid "Defining the model" -msgstr "定义模型" +#: ../../source/ref-changelog.md:1046 +msgid "v0.19.0 (2022-05-18)" +msgstr "v0.19.0 (2022-05-18)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:277 +#: ../../source/ref-changelog.md:1050 msgid "" -"We use the simple CNN described in the `PyTorch tutorial " -"`__:" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" msgstr "" -"我们使用` PyTorch 教程 " -"`__ 中描述的简单 CNN:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:314 -msgid "Let's continue with the usual training and test functions:" -msgstr "让我们继续进行常规的训练和测试功能:" +"**Flower Baselines(预览): FedOpt、FedBN、FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:374 -msgid "Training the model" -msgstr "训练模型" +#: ../../source/ref-changelog.md:1052 +#, fuzzy +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.ai/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.ai/docs/baselines/how-to-" +"contribute-baselines.html)." +msgstr "" +"Flower Baselines 的第一个预览版已经发布!我们通过实现 " +"FedOpt(FedYogi、FedAdam、FedAdagrad)、FedBN 和 FedAvgM 来启动 Flower " +"Baselines。请查阅文档了解如何使用 [Flower Baselines](https://flower.ai/docs/using-" +"baselines.html)。在首次发布预览版时,我们还邀请社区成员[贡献自己的Baselines](https://flower.ai/docs" +"/contributing-baselines.html)。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:376 +#: ../../source/ref-changelog.md:1054 msgid "" -"We now have all the basic building blocks we need: a dataset, a model, a " -"training function, and a test function. Let's put them together to train " -"the model on the dataset of one of our organizations " -"(``trainloaders[0]``). This simulates the reality of most machine " -"learning projects today: each organization has their own data and trains " -"models only on this internal data:" -msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "**C++客户端SDK(预览版)和代码示例**([#1111](https://github.com/adap/flower/pull/1111))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#: ../../source/ref-changelog.md:1056 msgid "" -"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " -"in a test set accuracy of about 41%, which is not good, but at the same " -"time, it doesn't really matter for the purposes of this tutorial. The " -"intent was just to show a simplistic centralized training pipeline that " -"sets the stage for what comes next - federated learning!" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." msgstr "" -"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " -"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 -msgid "Step 2: Federated Learning with Flower" -msgstr "步骤 2:使用 Flower 联邦学习" +"预览版支持用 C++ 编写的 Flower 客户端。C++ 预览版包括一个 Flower 客户端 SDK 和一个快速入门代码示例,使用 SDK " +"演示了一个简单的 C++ 客户端。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +#: ../../source/ref-changelog.md:1058 msgid "" -"Step 1 demonstrated a simple centralized training pipeline. All data was " -"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." -" Next, we'll simulate a situation where we have multiple datasets in " -"multiple organizations and where we train a model over these " -"organizations using federated learning." +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" msgstr "" -"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " -"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 -msgid "Updating model parameters" -msgstr "更新模型参数" +"** 增加对 Python 3.10 和 Python 3.11 的实验支持** " +"([#1135](https://github.com/adap/flower/pull/1135))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#: ../../source/ref-changelog.md:1060 msgid "" -"In federated learning, the server sends the global model parameters to " -"the client, and the client updates the local model with the parameters " -"received from the server. It then trains the model on the local data " -"(which changes the model parameters locally) and sends the " -"updated/changed model parameters back to the server (or, alternatively, " -"it sends just the gradients back to the server, not the full model " -"parameters)." -msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" +"Python 3.10 是 Python 的最新稳定版本,Python 3.11 将于 10 月份发布。Flower 版本增加了对这两个 " +"Python 版本的实验支持。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +#: ../../source/ref-changelog.md:1062 msgid "" -"We need two helper functions to update the local model with parameters " -"received from the server and to get the updated model parameters from the" -" local model: ``set_parameters`` and ``get_parameters``. The following " -"two functions do just that for the PyTorch model above." -msgstr "" -"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " -"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "**通过用户提供的函数聚合自定义指标**([#1144](https://github.com/adap/flower/pull/1144))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#: ../../source/ref-changelog.md:1064 msgid "" -"The details of how this works are not really important here (feel free to" -" consult the PyTorch documentation if you want to learn more). In " -"essence, we use ``state_dict`` to access PyTorch model parameter tensors." -" The parameter tensors are then converted to/from a list of NumPy " -"ndarray's (which Flower knows how to serialize/deserialize):" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." msgstr "" -"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " -"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" +"现在无需定制策略即可聚合自定义度量(如`准确度`)。内置策略支持两个新参数:`fit_metrics_aggregation_fn` " +"和`evaluate_metrics_aggregation_fn`,允许传递自定义度量聚合函数。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 -msgid "Implementing a Flower client" -msgstr "实现 Flower 客户端" +#: ../../source/ref-changelog.md:1066 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "**用户可配置的回合超时**([#1162](https://github.com/adap/flower/pull/1162))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#: ../../source/ref-changelog.md:1068 msgid "" -"With that out of the way, let's move on to the interesting part. " -"Federated learning systems consist of a server and multiple clients. In " -"Flower, we create clients by implementing subclasses of " -"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " -"``NumPyClient`` in this tutorial because it is easier to implement and " -"requires us to write less boilerplate." +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." msgstr "" -"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " -"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " -"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" +"新的配置值允许为 `start_server` 和 `start_simulation` 设置回合超时。如果 `config` 字典中包含一个 " +"`round_timeout` 键(以秒为单位的 `float`值),服务器将至少等待 ** `round_timeout` 秒后才关闭连接。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#: ../../source/ref-changelog.md:1070 msgid "" -"To implement the Flower client, we create a subclass of " -"``flwr.client.NumPyClient`` and implement the three methods " -"``get_parameters``, ``fit``, and ``evaluate``:" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" msgstr "" -"为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " -"``get_parameters``、``fit`` 和``evaluate`` 三个方法:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 -msgid "``get_parameters``: Return the current local model parameters" -msgstr "``get_parameters``: 返回当前本地模型参数" +"**允许在所有内置策略中同时使用联邦评价和集中评估** " +"([#1091](https://github.com/adap/flower/pull/1091))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#: ../../source/ref-changelog.md:1072 msgid "" -"``fit``: Receive model parameters from the server, train the model " -"parameters on the local data, and return the (updated) model parameters " -"to the server" -msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." +msgstr "" +"内置策略现在可以在同一轮中同时执行联邦评估(即客户端)和集中评估(即服务器端)。可以通过将 `fraction_eval` 设置为 " +"`0.0`来禁用联邦评估。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#: ../../source/ref-changelog.md:1074 msgid "" -"``evaluate``: Receive model parameters from the server, evaluate the " -"model parameters on the local data, and return the evaluation result to " -"the server" -msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" +"**两本新的 Jupyter Notebook 教程** " +"([#1141](https://github.com/adap/flower/pull/1141))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +#: ../../source/ref-changelog.md:1076 msgid "" -"We mentioned that our clients will use the previously defined PyTorch " -"components for model training and evaluation. Let's see a simple Flower " -"client implementation that brings everything together:" -msgstr "" -"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " -"客户端实现,它将一切都整合在一起:" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "两本 Jupyter Notebook 教程(与 Google Colab 兼容)介绍了 Flower 的基本和中级功能:" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#: ../../source/ref-changelog.md:1078 msgid "" -"Our class ``FlowerClient`` defines how local training/evaluation will be " -"performed and allows Flower to call the local training/evaluation through" -" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" -" *single client* in our federated learning system. Federated learning " -"systems have multiple clients (otherwise, there's not much to federate), " -"so each client will be represented by its own instance of " -"``FlowerClient``. If we have, for example, three clients in our workload," -" then we'd have three instances of ``FlowerClient``. Flower calls " -"``FlowerClient.fit`` on the respective instance when the server selects a" -" particular client for training (and ``FlowerClient.evaluate`` for " -"evaluation)." +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" msgstr "" -"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " -"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " -"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " -"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " -"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " -"``FlowerClient.evaluate``)。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:517 -msgid "Using the Virtual Client Engine" -msgstr "使用虚拟客户端引擎" +"*联邦学习简介*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:519 +#: ../../source/ref-changelog.md:1080 msgid "" -"In this notebook, we want to simulate a federated learning system with 10" -" clients on a single machine. This means that the server and all 10 " -"clients will live on a single machine and share resources such as CPU, " -"GPU, and memory. Having 10 clients would mean having 10 instances of " -"``FlowerClient`` in memory. Doing this on a single machine can quickly " -"exhaust the available memory resources, even if only a subset of these " -"clients participates in a single round of federated learning." +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" msgstr "" -"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " -"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " -"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" +"*在联邦学习中使用策略*: [在 Colab " +"中打开](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:521 +#: ../../source/ref-changelog.md:1082 msgid "" -"In addition to the regular capabilities where server and clients run on " -"multiple machines, Flower, therefore, provides special simulation " -"capabilities that create ``FlowerClient`` instances only when they are " -"actually necessary for training or evaluation. To enable the Flower " -"framework to create clients when necessary, we need to implement a " -"function called ``client_fn`` that creates a ``FlowerClient`` instance on" -" demand. Flower calls ``client_fn`` whenever it needs an instance of one " -"particular client to call ``fit`` or ``evaluate`` (those instances are " -"usually discarded after use, so they should not keep any local state). " -"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " -"be used, for example, to load different local data partitions for " -"different clients, as can be seen below:" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" msgstr "" -"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " -"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " -"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " -"``evaluate`` 时,它就会调用 " -"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " -"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" +"**新的 FedAvgM 策略(带服务器动量的联邦平均)** " +"([#1076](https://github.com/adap/flower/pull/1076))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:556 -msgid "Starting the training" -msgstr "开始训练" +#: ../../source/ref-changelog.md:1084 +msgid "" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "新的 \"FedAvgM \"策略实现了带服务器动量的联邦平均[Hsu et al., 2019\\]." -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:558 +#: ../../source/ref-changelog.md:1086 msgid "" -"We now have the class ``FlowerClient`` which defines client-side " -"training/evaluation and ``client_fn`` which allows Flower to create " -"``FlowerClient`` instances whenever it needs to call ``fit`` or " -"``evaluate`` on one particular client. The last step is to start the " -"actual simulation using ``flwr.simulation.start_simulation``." -msgstr "" -"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " -"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " -"``flwr.simulation.start_simulation`` 启动实际模拟。" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "**新的 PyTorch 高级代码示例** ([#1007](https://github.com/adap/flower/pull/1007))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:560 +#: ../../source/ref-changelog.md:1088 msgid "" -"The function ``start_simulation`` accepts a number of arguments, amongst " -"them the ``client_fn`` used to create ``FlowerClient`` instances, the " -"number of clients to simulate (``num_clients``), the number of federated " -"learning rounds (``num_rounds``), and the strategy. The strategy " -"encapsulates the federated learning approach/algorithm, for example, " -"*Federated Averaging* (FedAvg)." -msgstr "" -"函数 ``start_simulation`` 接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" -" (FedAvg)。" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "新代码示例 (`advanced_pytorch`) 演示了 PyTorch 的高级 Flower 概念。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:562 +#: ../../source/ref-changelog.md:1090 msgid "" -"Flower has a number of built-in strategies, but we can also use our own " -"strategy implementations to customize nearly all aspects of the federated" -" learning approach. For this example, we use the built-in ``FedAvg`` " -"implementation and customize it using a few basic parameters. The last " -"step is the actual call to ``start_simulation`` which - you guessed it - " -"starts the simulation:" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" msgstr "" -"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " -"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:608 -msgid "Behind the scenes" -msgstr "幕后" +"**新的 JAX 代码示例**([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:610 -msgid "So how does this work? How does Flower execute this simulation?" -msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" +#: ../../source/ref-changelog.md:1092 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "新代码示例(`jax_from_centralized_to_federated`)展示了使用 JAX 和 Flower 的联邦学习。" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:612 -#, python-format +#: ../../source/ref-changelog.md:1096 msgid "" -"When we call ``start_simulation``, we tell Flower that there are 10 " -"clients (``num_clients=10``). Flower then goes ahead an asks the " -"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " -"select 100% of the available clients (``fraction_fit=1.0``), so it goes " -"ahead and selects 10 random clients (i.e., 100% of 10)." +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" msgstr "" -"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " -"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" -" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" +"新增选项,用于在 \"start_simulation\"(开始模拟)中已初始化 Ray 的情况下保持 Ray " +"运行([#1177](https://github.com/adap/flower/pull/1177))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:614 +#: ../../source/ref-changelog.md:1097 msgid "" -"Flower then asks the selected 10 clients to train the model. When the " -"server receives the model parameter updates from the clients, it hands " -"those updates over to the strategy (*FedAvg*) for aggregation. The " -"strategy aggregates those updates and returns the new global model, which" -" then gets used in the next round of federated learning." +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" msgstr "" -"然后,Flower 会要求选定的 10 " -"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" - -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:626 -msgid "Where's the accuracy?" -msgstr "准确度在哪里找?" +"添加对自定义 \"客户端管理器 \"作为 \"start_simulation " +"\"参数的支持([#1171](https://github.com/adap/flower/pull/1171))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +#: ../../source/ref-changelog.md:1098 msgid "" -"You may have noticed that all metrics except for ``losses_distributed`` " -"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +"New documentation for [implementing " +"strategies](https://flower.ai/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" msgstr "" -"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " -"float(准确度)}``去哪儿了?" +"[实施战略](https://flower.ai/docs/framework/how-to-implement-strategies.html)" +" 的新文件([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175)" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +#: ../../source/ref-changelog.md:1099 msgid "" -"Flower can automatically aggregate losses returned by individual clients," -" but it cannot do the same for metrics in the generic metrics dictionary " -"(the one with the ``accuracy`` key). Metrics dictionaries can contain " -"very different kinds of metrics and even key/value pairs that are not " -"metrics at all, so the framework does not (and can not) know how to " -"handle these automatically." -msgstr "" -"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " -"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "新的移动友好型文档主题 ([#1174](https://github.com/adap/flower/pull/1174))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#: ../../source/ref-changelog.md:1100 msgid "" -"As users, we need to tell the framework how to handle/aggregate these " -"custom metrics, and we do so by passing metric aggregation functions to " -"the strategy. The strategy will then call these functions whenever it " -"receives fit or evaluate metrics from clients. The two possible functions" -" are ``fit_metrics_aggregation_fn`` and " -"``evaluate_metrics_aggregation_fn``." +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" msgstr "" -"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" -" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" +"限制(可选)`ray`依赖的版本范围,使其仅包含兼容版本(`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +#: ../../source/ref-changelog.md:1104 msgid "" -"Let's create a simple weighted averaging function to aggregate the " -"``accuracy`` metric we return from ``evaluate``:" -msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "**删除对 Python 3.6 的过时支持** ([#871](https://github.com/adap/flower/pull/871))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:660 +#: ../../source/ref-changelog.md:1105 msgid "" -"The only thing left to do is to tell the strategy to call this function " -"whenever it receives evaluation metric dictionaries from the clients:" -msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "**移除过时的 KerasClient**([#857](https://github.com/adap/flower/pull/857))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:697 +#: ../../source/ref-changelog.md:1106 msgid "" -"We now have a full system that performs federated training and federated " -"evaluation. It uses the ``weighted_average`` function to aggregate custom" -" evaluation metrics and calculates a single ``accuracy`` metric across " -"all clients on the server side." -msgstr "" -"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " -"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "**移除过时的不操作额外安装** ([#973](https://github.com/adap/flower/pull/973))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1107 msgid "" -"The other two categories of metrics (``losses_centralized`` and " -"``metrics_centralized``) are still empty because they only apply when " -"centralized evaluation is being used. Part two of the Flower tutorial " -"will cover centralized evaluation." +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" msgstr "" -"其他两类指标(`losses_centralized`` 和 " -"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" +"**从** `FitRes` **和** `EvaluateRes` 中移除已废弃的 proto 字段 " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 -msgid "Final remarks" -msgstr "结束语" +#: ../../source/ref-changelog.md:1108 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" +msgstr "" +"**移除过时的 QffedAvg 策略(由 QFedAvg 取代)** " +"([#1107](https://github.com/adap/flower/pull/1107))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +#: ../../source/ref-changelog.md:1109 msgid "" -"Congratulations, you just trained a convolutional neural network, " -"federated over 10 clients! With that, you understand the basics of " -"federated learning with Flower. The same approach you've seen can be used" -" with other machine learning frameworks (not just PyTorch) and tasks (not" -" just CIFAR-10 images classification), for example NLP with Hugging Face " -"Transformers or speech with SpeechBrain." +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " -"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " -"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" +"**删除过时的 DefaultStrategy 策略** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:715 +#: ../../source/ref-changelog.md:1110 msgid "" -"In the next notebook, we're going to cover some more advanced concepts. " -"Want to customize your strategy? Initialize parameters on the server " -"side? Or evaluate the aggregated model on the server side? We'll cover " -"all this and more in the next tutorial." -msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" +"**删除已过时的对 eval_fn 返回值准确性的支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +#: ../../source/ref-changelog.md:1111 msgid "" -"The `Flower Federated Learning Tutorial - Part 2 " -"`__ goes into more depth about strategies and all " -"the advanced things you can build with them." +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" msgstr "" -"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" +"**移除对以 NumPy ndarrays 传递初始参数的过时支持** " +"([#1142](https://github.com/adap/flower/pull/1142))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 -msgid "Use a federated learning strategy" -msgstr "使用联邦学习策略" +#: ../../source/ref-changelog.md:1113 +msgid "v0.18.0 (2022-02-28)" +msgstr "v0.18.0 (2022-02-28)" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +#: ../../source/ref-changelog.md:1117 msgid "" -"Welcome to the next part of the federated learning tutorial. In previous " -"parts of this tutorial, we introduced federated learning with PyTorch and" -" Flower (`part 1 `__)." +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" msgstr "" -"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " -"`___)。" +"**改进了虚拟客户端引擎与 Jupyter Notebook / Google Colab 的兼容性** " +"([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#: ../../source/ref-changelog.md:1119 msgid "" -"In this notebook, we'll begin to customize the federated learning system " -"we built in the introductory notebook (again, using `Flower " -"`__ and `PyTorch `__)." +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"'flwr[simulation]'`)." msgstr "" -"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " -"`PyTorch `__)。" +"通过 `start_simulation` 在 Jupyter 笔记本(包括 Google Colab)上安装 Flower 并附加 " +"`simulation` (`pip install 'flwr[simulation]'`)后,模拟(通过 `start_simulation`" +" 使用虚拟客户端引擎)现在可以更流畅地运行。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 -msgid "Let's move beyond FedAvg with Flower strategies!" -msgstr "让我们超越 FedAvg,采用Flower策略!" +#: ../../source/ref-changelog.md:1121 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" +msgstr "" +"**新的 Jupyter Notebook 代码示例** " +"([#833](https://github.com/adap/flower/pull/833))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 -msgid "Strategy customization" -msgstr "策略定制" +#: ../../source/ref-changelog.md:1123 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." +msgstr "" +"新代码示例(`quickstart_simulation`)通过 Jupyter Notebook(包括 Google " +"Colab)演示了使用虚拟客户端引擎进行 Flower 模拟。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +#: ../../source/ref-changelog.md:1125 msgid "" -"So far, everything should look familiar if you've worked through the " -"introductory notebook. With that, we're ready to introduce a number of " -"new features." -msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "**客户端属性(功能预览)** ([#795](https://github.com/adap/flower/pull/795))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 -msgid "Server-side parameter **initialization**" -msgstr "服务器端参数 **初始化**" +#: ../../source/ref-changelog.md:1127 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "客户端可以实现一个新方法 `get_properties`,以启用服务器端策略来查询客户端属性。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +#: ../../source/ref-changelog.md:1129 msgid "" -"Flower, by default, initializes the global model by asking one random " -"client for the initial parameters. In many cases, we want more control " -"over parameter initialization though. Flower therefore allows you to " -"directly pass the initial parameters to the Strategy:" -msgstr "" -"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" -" 允许您直接将初始参数传递给策略:" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "** 使用 TFLite 实验性支持安卓系统** ([#865](https://github.com/adap/flower/pull/865))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +#: ../../source/ref-changelog.md:1131 msgid "" -"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" -" from asking one of the clients for the initial parameters. If we look " -"closely, we can see that the logs do not show any calls to the " -"``FlowerClient.get_parameters`` method." +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." msgstr "" -"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " -"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " -"方法的任何调用。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 -msgid "Starting with a customized strategy" -msgstr "从定制战略开始" +"`main`终于支持 Android 了!Flower 的设计与客户端和框架无关。我们可以集成任意客户端平台,有了这个版本,在安卓系统上使用 " +"Flower 就变得更容易了。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +#: ../../source/ref-changelog.md:1133 msgid "" -"We've seen the function ``start_simulation`` before. It accepts a number " -"of arguments, amongst them the ``client_fn`` used to create " -"``FlowerClient`` instances, the number of clients to simulate " -"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." msgstr "" -"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " -"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" +"该示例在客户端使用了 TFLite 以及新的 `FedAvgAndroid`策略。Android 客户端和 " +"`FedAvgAndroid`仍处于试验阶段,但这是向成熟的 Android SDK 和集成了 `FedAvgAndroid`新功能的统一 " +"`FedAvg`实现迈出的第一步。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +#: ../../source/ref-changelog.md:1135 msgid "" -"The strategy encapsulates the federated learning approach/algorithm, for " -"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " -"strategy this time:" -msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 -msgid "Server-side parameter **evaluation**" -msgstr "服务器端参数**评估**" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" +"**使 gRPC 保持连接时间可由用户配置,并缩短默认保持连接时间** " +"([#1069](https://github.com/adap/flower/pull/1069))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +#: ../../source/ref-changelog.md:1137 msgid "" -"Flower can evaluate the aggregated model on the server-side or on the " -"client-side. Client-side and server-side evaluation are similar in some " -"ways, but different in others." -msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" +"为提高 Flower 与更多云环境(如 Microsoft Azure)的兼容性,缩短了默认 gRPC 保持时间。用户可以根据具体要求配置 " +"keepalive 时间,自定义 gRPC 堆栈。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +#: ../../source/ref-changelog.md:1139 msgid "" -"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " -"simple: it works the same way that evaluation in centralized machine " -"learning does. If there is a server-side dataset that can be used for " -"evaluation purposes, then that's great. We can evaluate the newly " -"aggregated model after each round of training without having to send the " -"model to clients. We're also fortunate in the sense that our entire " -"evaluation dataset is available at all times." -msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" +msgstr "" +"**使用 Opacus 和 PyTorch 的新差分隐私示例** " +"([#805](https://github.com/adap/flower/pull/805))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +#: ../../source/ref-changelog.md:1141 msgid "" -"**Federated Evaluation** (or *client-side evaluation*) is more complex, " -"but also more powerful: it doesn't require a centralized dataset and " -"allows us to evaluate models over a larger set of data, which often " -"yields more realistic evaluation results. In fact, many scenarios require" -" us to use **Federated Evaluation** if we want to get representative " -"evaluation results at all. But this power comes at a cost: once we start " -"to evaluate on the client side, we should be aware that our evaluation " -"dataset can change over consecutive rounds of learning if those clients " -"are not always available. Moreover, the dataset held by each client can " -"also change over consecutive rounds. This can lead to evaluation results " -"that are not stable, so even if we would not change the model, we'd see " -"our evaluation results fluctuate over consecutive rounds." -msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "一个新的代码示例(\"opacus\")演示了使用 Opacus、PyTorch 和 Flower 进行差分隐私的联邦学习。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +#: ../../source/ref-changelog.md:1143 msgid "" -"We've seen how federated evaluation works on the client side (i.e., by " -"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " -"how we can evaluate aggregated model parameters on the server-side:" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" msgstr "" -"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " -"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 -msgid "Sending/receiving arbitrary values to/from clients" -msgstr "向/从客户端发送/接收任意值" +"**新的Hugging Face Transformers代码示例** " +"([#863](https://github.com/adap/flower/pull/863))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +#: ../../source/ref-changelog.md:1145 msgid "" -"In some situations, we want to configure client-side execution (training," -" evaluation) from the server-side. One example for that is the server " -"asking the clients to train for a certain number of local epochs. Flower " -"provides a way to send configuration values from the server to the " -"clients using a dictionary. Let's look at an example where the clients " -"receive values from the server through the ``config`` parameter in " -"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " -"method receives the configuration dictionary through the ``config`` " -"parameter and can then read values from this dictionary. In this example," -" it reads ``server_round`` and ``local_epochs`` and uses those values to " -"improve the logging and configure the number of local training epochs:" -msgstr "" -"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " -"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " -"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " -"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " -"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "新的代码示例(`quickstart_huggingface`)证明了结合Flower和Hugging Face Transformers的实用性。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +#: ../../source/ref-changelog.md:1147 msgid "" -"So how can we send this config dictionary from server to clients? The " -"built-in Flower Strategies provide way to do this, and it works similarly" -" to the way server-side evaluation works. We provide a function to the " -"strategy, and the strategy calls this function for every round of " -"federated learning:" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" msgstr "" -"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " -"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 -msgid "" -"Next, we'll just pass this function to the FedAvg strategy before " -"starting the simulation:" -msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" +"**新的 MLCube 代码示例** ([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +#: ../../source/ref-changelog.md:1149 msgid "" -"As we can see, the client logs now include the current round of federated" -" learning (which they read from the ``config`` dictionary). We can also " -"configure local training to run for one epoch during the first and second" -" round of federated learning, and then for two epochs during the third " -"round." -msgstr "" -"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " -"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "新代码示例(\"quickstart_mlcube\")演示了 MLCube 与 Flower 的用法。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +#: ../../source/ref-changelog.md:1151 msgid "" -"Clients can also return arbitrary values to the server. To do so, they " -"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " -"used this concept throughout this notebook without mentioning it " -"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" -" key/value pair as the third return value in ``evaluate``." +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" msgstr "" -"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " -"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" -" ``evaluate`` 中的第三个返回值。" - -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 -msgid "Scaling federated learning" -msgstr "扩大联邦学习的规模" +"** 支持 SSL 的服务器和客户端** ([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +#: ../../source/ref-changelog.md:1153 msgid "" -"As a last step in this notebook, let's see how we can use Flower to " -"experiment with a large number of clients." -msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "SSL 可实现客户端与服务器之间的安全加密连接。该版本开源了 Flower 安全 gRPC 实现,使所有 Flower 用户都能访问加密通信通道。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 -#, python-format +#: ../../source/ref-changelog.md:1155 msgid "" -"We now have 1000 partitions, each holding 45 training and 5 validation " -"examples. Given that the number of training examples on each client is " -"quite small, we should probably train the model a bit longer, so we " -"configure the clients to perform 3 local training epochs. We should also " -"adjust the fraction of clients selected for training during each round " -"(we don't want all 1000 clients participating in every round), so we " -"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " -"available clients (so 50 clients) will be selected for training each " -"round:" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" msgstr "" -"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " -"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " -"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " -"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" +"**更新**`FedAdam`**和**`FedYogi`**战略** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +#: ../../source/ref-changelog.md:1157 msgid "" -"In this notebook, we've seen how we can gradually enhance our system by " -"customizing the strategy, initializing parameters on the server side, " -"choosing a different strategy, and evaluating models on the server-side. " -"That's quite a bit of flexibility with so little code, right?" -msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "FedAdam \"和 \"FedAdam \"与最新版本的 \"自适应联邦优化 \"论文相匹配。" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +#: ../../source/ref-changelog.md:1159 msgid "" -"In the later sections, we've seen how we can communicate arbitrary values" -" between server and clients to fully customize client-side execution. " -"With that capability, we built a large-scale Federated Learning " -"simulation using the Flower Virtual Client Engine and ran an experiment " -"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" msgstr "" -"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " -"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " -"个客户端!" +"**初始化** `start_simulation` **使用客户端 ID 列表** " +"([#860](https://github.com/adap/flower/pull/860))" -#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +#: ../../source/ref-changelog.md:1161 msgid "" -"The `Flower Federated Learning Tutorial - Part 3 " -"`__ shows how to build a fully custom ``Strategy`` from " -"scratch." +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." msgstr "" -"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" +"现在可以使用客户端 ID 列表(`clients_ids`,类型:`List[str]`)调用 " +"`start_simulation`。每当需要初始化客户端时,这些 ID 就会被传递到 `client_fn` 中,这样就能更轻松地加载无法通过 " +"`int` 标识符访问的数据分区。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 -msgid "What is Federated Learning?" -msgstr "什么是联邦学习?" +#: ../../source/ref-changelog.md:1165 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" +"更新 PyTorch 代码示例中的 \"num_examples \"计算 " +"([#909](https://github.com/adap/flower/pull/909))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +#: ../../source/ref-changelog.md:1166 msgid "" -"In this tutorial, you will learn what federated learning is, build your " -"first system in Flower, and gradually extend it. If you work through all " -"parts of the tutorial, you will be able to build advanced federated " -"learning systems that approach the current state of the art in the field." +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" msgstr "" -"在本教程中,你将了解什么是联邦学习,用 Flower " -"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" +"通过 `flwr.__version__` 公开 Flower 版本 " +"([#952](https://github.com/adap/flower/pull/952))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +#: ../../source/ref-changelog.md:1167 msgid "" -"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " -"federated learning. Only a basic understanding of data science and Python" -" programming is assumed." -msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" +"`app.py`中的 `start_server`现在会返回一个 `History` " +"对象,其中包含训练中的指标([#974](https://github.com/adap/flower/pull/974))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +#: ../../source/ref-changelog.md:1168 msgid "" -"`Star Flower on GitHub `__ ⭐️ and join " -"the open-source Flower community on Slack to connect, ask questions, and " -"get help: `Join Slack `__ 🌼 We'd love to " -"hear from you in the ``#introductions`` channel! And if anything is " -"unclear, head over to the ``#questions`` channel." +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" msgstr "" -"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " -"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" -" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" +"使 `max_workers`(由 " +"`ThreadPoolExecutor`使用)可配置([#978](https://github.com/adap/flower/pull/978))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 -msgid "Classic machine learning" -msgstr "经典机器学习" +#: ../../source/ref-changelog.md:1169 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "在所有代码示例中,将服务器启动后的休眠时间延长至三秒([#1086](https://github.com/adap/flower/pull/1086))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +#: ../../source/ref-changelog.md:1170 msgid "" -"Before we begin to discuss federated learning, let us quickly recap how " -"most machine learning works today." -msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "在文档中添加了新的常见问题部分 ([#948](https://github.com/adap/flower/pull/948))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +#: ../../source/ref-changelog.md:1171 msgid "" -"In machine learning, we have a model, and we have data. The model could " -"be a neural network (as depicted here), or something else, like classical" -" linear regression." -msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "还有更多底层更改、库更新、文档更改和工具改进!" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|93b02017c78049bbbd5ae456dcb2c91b|" +#: ../../source/ref-changelog.md:1175 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" msgstr "" +"**从发布版中删除**`flwr_example`**和**`flwr_experimental`** " +"([#869](https://github.com/adap/flower/pull/869))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 -msgid "Model and data" -msgstr "模型和数据" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +#: ../../source/ref-changelog.md:1177 msgid "" -"We train the model using the data to perform a useful task. A task could " -"be to detect objects in images, transcribe an audio recording, or play a " -"game like Go." -msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|01471150fd5144c080a176b43e92a3ff|" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." msgstr "" +"自 Flower 0.12.0 起,软件包 `flwr_example` 和 `flwr_experimental` 已被弃用,它们不再包含在 " +"Flower 的发布版本中。相关的额外包(`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`)现在已不再使用,并将在即将发布的版本中移除。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 -msgid "Train model using data" -msgstr "使用数据训练模型" +#: ../../source/ref-changelog.md:1179 +msgid "v0.17.0 (2021-09-24)" +msgstr "v0.17.0 (2021-09-24)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +#: ../../source/ref-changelog.md:1183 msgid "" -"Now, in practice, the training data we work with doesn't originate on the" -" machine we train the model on. It gets created somewhere else." -msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" +msgstr "" +"**实验性虚拟客户端引擎** ([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +#: ../../source/ref-changelog.md:1185 msgid "" -"It originates on a smartphone by the user interacting with an app, a car " -"collecting sensor data, a laptop receiving input via the keyboard, or a " -"smart speaker listening to someone trying to sing a song." -msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|9bc21c7dbd17444a8f070c60786e3484|" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." msgstr "" +"Flower 的目标之一是实现大规模研究。这一版本首次(试验性地)展示了代号为 \"虚拟客户端引擎 " +"\"的重要新功能。虚拟客户端可以在单台机器或计算集群上对大量客户端进行模拟。测试新功能的最简单方法是查看名为 " +"\"quickstart_simulation \"和 \"simulation_pytorch \"的两个新代码示例。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 -msgid "Data on a phone" -msgstr "手机上的数据" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +#: ../../source/ref-changelog.md:1187 msgid "" -"What's also important to mention, this \"somewhere else\" is usually not " -"just one place, it's many places. It could be several devices all running" -" the same app. But it could also be several organizations, all generating" -" data for the same task." +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." msgstr "" -"值得一提的是,这个 \"其他地方 " -"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" +"该功能仍处于试验阶段,因此无法保证 API " +"的稳定性。此外,它还没有完全准备好进入黄金时间,并有一些已知的注意事项。不过,我们鼓励好奇的用户尝试使用并分享他们的想法。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|3047bbce54b34099ae559963d0420d79|" +#: ../../source/ref-changelog.md:1189 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" msgstr "" +"**新的内置策略**([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 -msgid "Data is on many devices" -msgstr "数据存在于多种设备中" +#: ../../source/ref-changelog.md:1191 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedYogi - 在服务器端使用 Yogi 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +#: ../../source/ref-changelog.md:1192 msgid "" -"So to use machine learning, or any kind of data analysis, the approach " -"that has been used in the past was to collect all data on a central " -"server. This server can be somewhere in a data center, or somewhere in " -"the cloud." -msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "FedAdam - 在服务器端使用 Adam 的联邦学习策略。基于 https://arxiv.org/abs/2003.00295 实现" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|e9f8ce948593444fb838d2f354c7ec5d|" +#: ../../source/ref-changelog.md:1194 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" msgstr "" +"**新的 PyTorch Lightning 代码示例** " +"([#617](https://github.com/adap/flower/pull/617))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 -msgid "Central data collection" -msgstr "集中数据收集" +#: ../../source/ref-changelog.md:1196 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "**新的变分自动编码器代码示例** ([#752](https://github.com/adap/flower/pull/752))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +#: ../../source/ref-changelog.md:1198 msgid "" -"Once all the data is collected in one place, we can finally use machine " -"learning algorithms to train our model on the data. This is the machine " -"learning approach that we've basically always relied on." -msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "**新的 scikit-learn 代码示例** ([#748](https://github.com/adap/flower/pull/748))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|c24c1478b30e4f74839208628a842d1e|" -msgstr "" +#: ../../source/ref-changelog.md:1200 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "**新的实验性 TensorBoard 策略**([#789](https://github.com/adap/flower/pull/789))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 -msgid "Central model training" -msgstr "集中模型训练" +#: ../../source/ref-changelog.md:1204 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "改进的高级 TensorFlow 代码示例([#769](https://github.com/adap/flower/pull/769)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 -msgid "Challenges of classical machine learning" -msgstr "经典机器学习面临的挑战" +#: ../../source/ref-changelog.md:1205 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" +"当 `min_available_clients` 配置错误时发出警告 " +"([#830](https://github.com/adap/flower/pull/830))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +#: ../../source/ref-changelog.md:1206 msgid "" -"The classic machine learning approach we've just seen can be used in some" -" cases. Great examples include categorizing holiday photos, or analyzing " -"web traffic. Cases, where all the data is naturally available on a " -"centralized server." -msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "改进了 gRPC 服务器文档([#841](https://github.com/adap/flower/pull/841))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|1b3613d7a58847b59e1d3180802dbc09|" -msgstr "" +#: ../../source/ref-changelog.md:1207 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "改进了 `NumPyClient` 中的错误信息 ([#851](https://github.com/adap/flower/pull/851))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 -msgid "Centralized possible" -msgstr "可集中管理" +#: ../../source/ref-changelog.md:1208 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "改进的 PyTorch 快速启动代码示例 ([#852](https://github.com/adap/flower/pull/852))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +#: ../../source/ref-changelog.md:1212 msgid "" -"But the approach can not be used in many other cases. Cases, where the " -"data is not available on a centralized server, or cases where the data " -"available on one server is not enough to train a good model." -msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "**禁用最终分布式评价** ([#800](https://github.com/adap/flower/pull/800))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|9980b5213db547d0b8024a50992b9e3f|" +#: ../../source/ref-changelog.md:1214 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." msgstr "" +"之前的行为是在所有连接的客户端上执行最后一轮分布式评估,而这通常是不需要的(例如,在使用服务器端评估时)。可以通过向 `start_server`" +" 传递 `force_final_distributed_eval=True` 来启用之前的行为。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 -msgid "Centralized impossible" -msgstr "无法集中" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +#: ../../source/ref-changelog.md:1216 msgid "" -"There are many reasons why the classic centralized machine learning " -"approach does not work for a large number of highly important real-world " -"use cases. Those reasons include:" -msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "**更名为 q-FedAvg 策略** ([#802](https://github.com/adap/flower/pull/802))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +#: ../../source/ref-changelog.md:1218 msgid "" -"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " -"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " -"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " -"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " -"protect sensitive data from being moved. In fact, those regulations " -"sometimes even prevent single organizations from combining their own " -"users' data for artificial intelligence training because those users live" -" in different parts of the world, and their data is governed by different" -" data protection regulations." +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." msgstr "" -"**法规**: " -"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" +"名为 `QffedAvg` 的策略已更名为 `QFedAvg`,以更好地反映原始论文中给出的符号(q-FFL 是优化目标,q-FedAvg " +"是建议的求解器)。请注意,出于兼容性原因,原始(现已废弃)的 `QffedAvg` 类仍然可用(它将在未来的版本中移除)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +#: ../../source/ref-changelog.md:1220 msgid "" -"**User preference**: In addition to regulation, there are use cases where" -" users just expect that no data leaves their device, ever. If you type " -"your passwords and credit card info into the digital keyboard of your " -"phone, you don't expect those passwords to end up on the server of the " -"company that developed that keyboard, do you? In fact, that use case was " -"the reason federated learning was invented in the first place." +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" msgstr "" -"**用户偏好**: " -"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" +"**删除并重命名代码示例**`simulation_pytorch`**为**`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +#: ../../source/ref-changelog.md:1222 msgid "" -"**Data volume**: Some sensors, like cameras, produce such a high data " -"volume that it is neither feasible nor economic to collect all the data " -"(due to, for example, bandwidth or communication efficiency). Think about" -" a national rail service with hundreds of train stations across the " -"country. If each of these train stations is outfitted with a number of " -"security cameras, the volume of raw on-device data they produce requires " -"incredibly powerful and exceedingly expensive infrastructure to process " -"and store. And most of the data isn't even useful." +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." msgstr "" -"**数据量**: " -"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" +"该示例已被新示例取代。新示例基于试验性虚拟客户端引擎,它将成为在 Flower " +"中进行大多数类型大规模模拟的新的默认方式。现有示例将作为参考保留,但将来可能会删除。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 -msgid "Examples where centralized machine learning does not work include:" -msgstr "集中式机器学习不起作用的例子包括:" +#: ../../source/ref-changelog.md:1224 +msgid "v0.16.0 (2021-05-11)" +msgstr "v0.16.0 (2021-05-11)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +#: ../../source/ref-changelog.md:1228 msgid "" -"Sensitive healthcare records from multiple hospitals to train cancer " -"detection models" -msgstr "用多家医院的敏感医疗记录训练癌症检测模型" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "**新的内置策略** ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 -msgid "" -"Financial information from different organizations to detect financial " -"fraud" -msgstr "不同组织的财务信息,以侦查财务欺诈行为" +#: ../../source/ref-changelog.md:1230 +msgid "(abstract) FedOpt" +msgstr "(摘要) FedOpt" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 -msgid "Location data from your electric car to make better range prediction" -msgstr "通过电动汽车的定位数据更好地预测续航里程" +#: ../../source/ref-changelog.md:1233 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "**服务器和策略的自定义指标** ([#717](https://github.com/adap/flower/pull/717))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 -msgid "End-to-end encrypted messages to train better auto-complete models" -msgstr "端到端加密信息可训练出更好的自动完成模型" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +#: ../../source/ref-changelog.md:1235 msgid "" -"The popularity of privacy-enhancing systems like the `Brave " -"`__ browser or the `Signal `__ " -"messenger shows that users care about privacy. In fact, they choose the " -"privacy-enhancing version over other alternatives, if such an alternative" -" exists. But what can we do to apply machine learning and data science to" -" these cases to utilize private data? After all, these are all areas that" -" would benefit significantly from recent advances in AI." +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." msgstr "" -"像 `Brave `__浏览器或 `Signal " -"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 -msgid "Federated learning" -msgstr "联邦学习" +"Flower 服务器现在完全与任务无关,所有剩余的任务特定度量(如 \"准确度\")都已被自定义度量字典取代。Flower 0.15 " +"引入了从客户端向服务器传递包含自定义指标的字典的功能。从本版本开始,自定义指标将取代服务器上的特定任务指标。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +#: ../../source/ref-changelog.md:1237 msgid "" -"Federated learning simply reverses this approach. It enables machine " -"learning on distributed data by moving the training to the data, instead " -"of moving the data to the training. Here's the single-sentence " -"explanation:" -msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to built-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" +"自定义度量字典现在可在两个面向用户的 API 中使用:它们可从策略方法 `aggregate_fit`/`aggregate_evaluate` " +"返回,还可使传递给内置策略(通过 `eval_fn`)的评估函数返回两个以上的评估度量。策略甚至可以返回 *aggregated* " +"指标字典,以便服务器跟踪。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 -msgid "Central machine learning: move the data to the computation" -msgstr "集中式机器学习:将数据转移到计算中心" +#: ../../source/ref-changelog.md:1239 +msgid "" +"Strategy implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" +"Strategy 实现应将其 `aggregate_fit` 和 `aggregate_evaluate` " +"方法迁移到新的返回类型(例如,只需返回空的 `{}`),服务器端评估函数应从 `return loss, accuracy` 迁移到 " +"`return loss, {\"accuracy\": accuracy}`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 -msgid "Federated (machine) learning: move the computation to the data" -msgstr "联邦式(机器)学习:将计算转移到数据上" +#: ../../source/ref-changelog.md:1241 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "Flower 0.15 风格的返回类型已被弃用(但仍受支持),兼容性将在未来的版本中移除。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +#: ../../source/ref-changelog.md:1243 msgid "" -"By doing so, it enables us to use machine learning (and other data " -"science approaches) in areas where it wasn't possible before. We can now " -"train excellent medical AI models by enabling different hospitals to work" -" together. We can solve financial fraud by training AI models on the data" -" of different financial institutions. We can build novel privacy-" -"enhancing applications (such as secure messaging) that have better built-" -"in AI than their non-privacy-enhancing alternatives. And those are just a" -" few of the examples that come to mind. As we deploy federated learning, " -"we discover more and more areas that can suddenly be reinvented because " -"they now have access to vast amounts of previously inaccessible data." -msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "** 过时功能的迁移警告** ([#690](https://github.com/adap/flower/pull/690))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +#: ../../source/ref-changelog.md:1245 msgid "" -"So how does federated learning work, exactly? Let's start with an " -"intuitive explanation." -msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" +"Flower 早期版本通常会迁移到新的应用程序接口,同时保持与旧版应用程序接口的兼容。如果检测到使用了过时的 " +"API,本版本将引入详细的警告信息。新的警告信息通常会详细说明如何迁移到更新的 API,从而简化从一个版本到另一个版本的过渡。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 -msgid "Federated learning in five steps" -msgstr "联邦学习的五个步骤" +#: ../../source/ref-changelog.md:1247 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" +"改进了文档和文档说明 ([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 -msgid "Step 0: Initialize global model" -msgstr "步骤 0:初始化全局模型" +#: ../../source/ref-changelog.md:1249 +msgid "MXNet example and documentation" +msgstr "MXNet 示例和文档" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +#: ../../source/ref-changelog.md:1251 msgid "" -"We start by initializing the model on the server. This is exactly the " -"same in classic centralized learning: we initialize the model parameters," -" either randomly or from a previously saved checkpoint." -msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" msgstr "" +"PyTorch 示例中的 FedBN 实现: 从集中到联邦 " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 -msgid "Initialize global model" -msgstr "初始化全局模型" +#: ../../source/ref-changelog.md:1255 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "**序列化无关服务器** ([#721](https://github.com/adap/flower/pull/721))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +#: ../../source/ref-changelog.md:1257 msgid "" -"Step 1: Send model to a number of connected organizations/devices (client" -" nodes)" -msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" +"Flower 服务器现在完全不依赖序列化。之前使用的 `Weights` 类(以反序列化的 NumPy ndarrays 表示参数)已被 " +"`Parameters` 类取代(例如在 `Strategy`中)。参数 " +"\"对象与序列化完全无关,它以字节数组的形式表示参数,\"tensor_type \"属性表示如何解释这些字节数组(例如,用于序列化/反序列化)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +#: ../../source/ref-changelog.md:1259 msgid "" -"Next, we send the parameters of the global model to the connected client " -"nodes (think: edge devices like smartphones or servers belonging to " -"organizations). This is to ensure that each participating node starts " -"their local training using the same model parameters. We often use only a" -" few of the connected nodes instead of all nodes. The reason for this is " -"that selecting more and more client nodes has diminishing returns." -msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slightly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" +"内置策略通过在内部处理序列化和反序列化到/从`Weights`来实现这种方法。自定义/第三方策略实现应更新为稍有改动的策略方法定义。策略作者可查阅" +" PR [#721](https://github.com/adap/flower/pull/721) 以了解如何将策略轻松迁移到新格式。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|032eb6fed6924ac387b9f13854919196|" +#: ../../source/ref-changelog.md:1261 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" msgstr "" +"已弃用 `flwr.server.Server.evaluate`,改用 " +"`flwr.server.Server.evaluate_round`([#717](https://github.com/adap/flower/pull/717)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 -msgid "Send global model" -msgstr "发送全局模型" +#: ../../source/ref-changelog.md:1263 +msgid "v0.15.0 (2021-03-12)" +msgstr "v0.15.0 (2021-03-12)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +#: ../../source/ref-changelog.md:1267 msgid "" -"Step 2: Train model locally on the data of each organization/device " -"(client node)" -msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "**服务器端参数初始化** ([#658](https://github.com/adap/flower/pull/658))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +#: ../../source/ref-changelog.md:1269 msgid "" -"Now that all (selected) client nodes have the latest version of the " -"global model parameters, they start the local training. They use their " -"own local dataset to train their own local model. They don't train the " -"model until full convergence, but they only train for a little while. " -"This could be as little as one epoch on the local data, or even just a " -"few steps (mini-batches)." +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." msgstr "" -"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" -"(mini-batches)。" +"现在可以在服务器端初始化模型参数。服务器端参数初始化通过名为 \"initialize_parameters \"的新 \"Strategy " +"\"方法进行。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#: ../../source/ref-changelog.md:1271 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." msgstr "" +"内置策略支持名为 \"initial_parameters " +"\"的新构造函数参数,用于设置初始参数。内置策略会在启动时向服务器提供这些初始参数,然后删除它们以释放内存。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 -msgid "Train on local data" -msgstr "根据本地数据进行训练" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 -msgid "Step 3: Return model updates back to the server" -msgstr "步骤 3:将模型参数更新返回服务器" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +#: ../../source/ref-changelog.md:1290 msgid "" -"After local training, each client node has a slightly different version " -"of the model parameters they originally received. The parameters are all " -"different because each client node has different examples in its local " -"dataset. The client nodes then send those model updates back to the " -"server. The model updates they send can either be the full model " -"parameters or just the gradients that were accumulated during local " -"training." -msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "如果没有向策略提供初始参数,服务器将继续使用当前行为(即向其中一个已连接的客户端询问参数,并将这些参数用作初始全局参数)。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|7efbe3d29d8349b89594e8947e910525|" +#: ../../source/ref-changelog.md:1294 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" msgstr "" +"停用 `flwr.server.strategy.DefaultStrategy`(迁移到等价的 " +"`flwr.server.strategy.FedAvg`)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 -msgid "Send model updates" -msgstr "发送模型参数更新" - -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 -msgid "Step 4: Aggregate model updates into a new global model" -msgstr "步骤 4:将模型更新聚合到新的全局模型中" +#: ../../source/ref-changelog.md:1296 +msgid "v0.14.0 (2021-02-18)" +msgstr "v0.14.0 (2021-02-18)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +#: ../../source/ref-changelog.md:1300 msgid "" -"The server receives model updates from the selected client nodes. If it " -"selected 100 client nodes, it now has 100 slightly different versions of " -"the original global model, each trained on the local data of one client. " -"But didn't we want to have one model that contains the learnings from the" -" data of all 100 client nodes?" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" msgstr "" -"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " -"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" +"**通用** `Client.fit` **和** `Client.evaluate` **返回值** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +#: ../../source/ref-changelog.md:1302 msgid "" -"In order to get one single model, we have to combine all the model " -"updates we received from the client nodes. This process is called " -"*aggregation*, and there are many different ways to do it. The most basic" -" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " -"`__), often abbreviated as *FedAvg*. " -"*FedAvg* takes the 100 model updates and, as the name suggests, averages " -"them. To be more precise, it takes the *weighted average* of the model " -"updates, weighted by the number of examples each client used for " -"training. The weighting is important to make sure that each data example " -"has the same \"influence\" on the resulting global model. If one client " -"has 10 examples, and another client has 100 examples, then - without " -"weighting - each of the 10 examples would influence the global model ten " -"times as much as each of the 100 examples." +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" msgstr "" -"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " -"*Federated Averaging* (`McMahan等人,2016 " -"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " -"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" -" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" -" 100 个示例的 10 倍。" +"客户端现在可以返回一个额外的字典,将 `str` 键映射为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。这意味着我们可以从 `fit`/`evaluate` " +"返回几乎任意的值,并在服务器端使用它们!" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|329fb3c04c744eda83bb51fa444c2266|" +#: ../../source/ref-changelog.md:1304 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." msgstr "" +"这一改进还使 `fit` 和 `evaluate` 之间的返回类型更加一致:`evaluate` 现在应返回一个元组`(float, int, " +"dict)`,代表损失、示例数和一个包含特定问题任意值(如准确度)的字典。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 -msgid "Aggregate model updates" -msgstr "聚合模型参数更新" +#: ../../source/ref-changelog.md:1306 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" +"如果你想知道:此功能与现有项目兼容,额外的字典返回值是可选的。不过,新代码应迁移到新的返回类型,以便与即将发布的 Flower " +"版本兼容(`fit`: `List[np.ndarray], int, Dict[str, Scalar]`,`evaluate`: " +"`float, int, Dict[str, Scalar]`)。详见下面的示例。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 -msgid "Step 5: Repeat steps 1 to 4 until the model converges" -msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" +#: ../../source/ref-changelog.md:1308 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "*代码示例:* 注意 `FlwrClient.fit` 和 `FlwrClient.evaluate` 中的附加字典返回值:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +#: ../../source/ref-changelog.md:1323 msgid "" -"Steps 1 to 4 are what we call a single round of federated learning. The " -"global model parameters get sent to the participating client nodes (step " -"1), the client nodes train on their local data (step 2), they send their " -"updated models to the server (step 3), and the server then aggregates the" -" model updates to get a new version of the global model (step 4)." +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" msgstr "" -"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " -"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" +"**在**`Client.fit` " +"**和**`Client.evaluate`中泛化**`config`参数([#595](https://github.com/adap/flower/pull/595))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +#: ../../source/ref-changelog.md:1325 msgid "" -"During a single round, each client node that participates in that " -"iteration only trains for a little while. This means that after the " -"aggregation step (step 4), we have a model that has been trained on all " -"the data of all participating client nodes, but only for a little while. " -"We then have to repeat this training process over and over again to " -"eventually arrive at a fully trained model that performs well across the " -"data of all client nodes." +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." msgstr "" -"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " -"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" +"`config`参数曾是 \"字典[str, str]\"类型,这意味着字典值应是字符串。新版本将其扩展为以下类型的值: " +"bool`、`bytes`、`float`、`int`、`str`。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +#: ../../source/ref-changelog.md:1327 msgid "" -"Congratulations, you now understand the basics of federated learning. " -"There's a lot more to discuss, of course, but that was federated learning" -" in a nutshell. In later parts of this tutorial, we will go into more " -"detail. Interesting questions include: How can we select the best client " -"nodes that should participate in the next round? What's the best way to " -"aggregate model updates? How can we handle failing client nodes " -"(stragglers)?" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" msgstr "" -"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" -" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" +"这意味着现在可以使用 `config` 字典向 `fit`/`evaluate` 传递几乎任意的值。耶,服务器端不再需要 " +"`str(epochs)`,客户端不再需要 `int(config[\"epochs\"])`!" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +#: ../../source/ref-changelog.md:1329 msgid "" -"Just like we can train a model on the decentralized data of different " -"client nodes, we can also evaluate the model on that data to receive " -"valuable metrics. This is called federated evaluation, sometimes " -"abbreviated as FE. In fact, federated evaluation is an integral part of " -"most federated learning systems." -msgstr "" -"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" -" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "*代码示例:* 注意 `config` 字典现在在 `Client.fit` 和 `Client.evaluate` 中都包含非 `str` 值:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 -msgid "Federated analytics" -msgstr "联邦分析" +#: ../../source/ref-changelog.md:1346 +msgid "v0.13.0 (2021-01-08)" +msgstr "v0.13.0 (2021-01-08)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +#: ../../source/ref-changelog.md:1350 msgid "" -"In many cases, machine learning isn't necessary to derive value from " -"data. Data analysis can yield valuable insights, but again, there's often" -" not enough data to get a clear answer. What's the average age at which " -"people develop a certain type of health condition? Federated analytics " -"enables such queries over multiple client nodes. It is usually used in " -"conjunction with other privacy-enhancing technologies like secure " -"aggregation to prevent the server from seeing the results submitted by " -"individual client nodes." -msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "新示例: PyTorch 从集中到联邦 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +#: ../../source/ref-changelog.md:1351 +msgid "Improved documentation" +msgstr "改进文档" + +#: ../../source/ref-changelog.md:1352 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "新文档主题 ([#551](https://github.com/adap/flower/pull/551))" + +#: ../../source/ref-changelog.md:1353 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "新的 API 参考 ([#554](https://github.com/adap/flower/pull/554))" + +#: ../../source/ref-changelog.md:1354 msgid "" -"Differential privacy (DP) is often mentioned in the context of Federated " -"Learning. It is a privacy-preserving method used when analyzing and " -"sharing statistical data, ensuring the privacy of individual " -"participants. DP achieves this by adding statistical noise to the model " -"updates, ensuring any individual participants’ information cannot be " -"distinguished or re-identified. This technique can be considered an " -"optimization that provides a quantifiable privacy protection measure." -msgstr "" -"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " -"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "更新了示例文档 ([#549](https://github.com/adap/flower/pull/549))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 -msgid "Flower" -msgstr "Flower" +#: ../../source/ref-changelog.md:1355 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "删除了过时的文档 ([#548](https://github.com/adap/flower/pull/548))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +#: ../../source/ref-changelog.md:1357 +msgid "Bugfix:" +msgstr "错误修正:" + +#: ../../source/ref-changelog.md:1359 msgid "" -"Federated learning, federated evaluation, and federated analytics require" -" infrastructure to move machine learning models back and forth, train and" -" evaluate them on local data, and then aggregate the updated models. " -"Flower provides the infrastructure to do exactly that in an easy, " -"scalable, and secure way. In short, Flower presents a unified approach to" -" federated learning, analytics, and evaluation. It allows the user to " -"federate any workload, any ML framework, and any programming language." +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." msgstr "" -"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " -"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " -"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" +"Server.fit \"完成后不会断开客户端连接,现在断开客户端连接是在 \"flwr.server.start_server " +"\"中处理的([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))。" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|c00bf2750bc24d229737a0fe1395f0fc|" -msgstr "" +#: ../../source/ref-changelog.md:1361 +msgid "v0.12.0 (2020-12-07)" +msgstr "v0.12.0 (2020-12-07)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 -msgid "" -"Flower federated learning server and client nodes (car, scooter, personal" -" computer, roomba, and phone)" -msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" +#: ../../source/ref-changelog.md:1363 ../../source/ref-changelog.md:1379 +msgid "Important changes:" +msgstr "重要变更:" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +#: ../../source/ref-changelog.md:1365 msgid "" -"Congratulations, you just learned the basics of federated learning and " -"how it relates to the classic (centralized) machine learning!" -msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "添加了嵌入式设备示例 ([#507](https://github.com/adap/flower/pull/507))" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +#: ../../source/ref-changelog.md:1366 msgid "" -"In the next part of this tutorial, we are going to build a first " -"federated learning system with Flower." -msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" +"添加了一个新的 NumPyClient(除现有的 KerasClient " +"之外)([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508)" -#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +#: ../../source/ref-changelog.md:1367 msgid "" -"The `Flower Federated Learning Tutorial - Part 1 " -"`__ shows how to build a simple federated learning system " -"with PyTorch and Flower." +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" msgstr "" -"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " -"构建一个简单的联邦学习系统。" +"弃用 `flwr_example` 软件包,并开始将示例迁移到顶层的 `examples` 目录 " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" -#~ msgid "Before the release" -#~ msgstr "发布前" +#: ../../source/ref-changelog.md:1369 +msgid "v0.11.0 (2020-11-30)" +msgstr "v0.11.0 (2020-11-30)" -#~ msgid "" -#~ "Update the changelog (``changelog.md``) with" -#~ " all relevant changes that happened " -#~ "after the last release. If the " -#~ "last release was tagged ``v1.2.0``, you" -#~ " can use the following URL to " -#~ "see all commits that got merged " -#~ "into ``main`` since then:" -#~ msgstr "" -#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " -#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" +#: ../../source/ref-changelog.md:1371 +msgid "Incompatible changes:" +msgstr "不兼容的更改:" -#~ msgid "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" -#~ msgstr "" -#~ "`GitHub: Compare v1.2.0...main " -#~ "`_" +#: ../../source/ref-changelog.md:1373 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" +"重命名了策略方法([#486](https://github.com/adap/flower/pull/486)),以统一 Flower公共 " +"API 的命名。其他公共方法/函数(例如 `Client` 中的每个方法,以及 `Strategy.evaluate`)不使用 `on_` " +"前缀,这就是我们从 Strategy 中的四个方法中移除它的原因。迁移时,请相应地重命名以下 `Strategy` 方法:" -#~ msgid "" -#~ "Thank the authors who contributed since" -#~ " the last release. This can be " -#~ "done by running the ``./dev/add-" -#~ "shortlog.sh`` convenience script (it can " -#~ "be ran multiple times and will " -#~ "update the names in the list if" -#~ " new contributors were added in the" -#~ " meantime)." -#~ msgstr "" -#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " -#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" +#: ../../source/ref-changelog.md:1374 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "`on_configure_evaluate` => `configure_evaluate`" -#~ msgid "" -#~ "Update the ``changelog.md`` section header " -#~ "``Unreleased`` to contain the version " -#~ "number and date for the release " -#~ "you are building. Create a pull " -#~ "request with the change." -#~ msgstr "" -#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " -#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" +#: ../../source/ref-changelog.md:1375 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "`on_aggregate_evaluate` => `aggregate_evaluate`" -#~ msgid "" -#~ "Second, create a virtual environment " -#~ "(and activate it). If you chose to" -#~ " use :code:`pyenv` (with the :code" -#~ ":`pyenv-virtualenv` plugin) and already " -#~ "have it installed , you can use" -#~ " the following convenience script (by " -#~ "default it will use :code:`Python " -#~ "3.8.17`, but you can change it by" -#~ " providing a specific :code:``)::" -#~ msgstr "" -#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" -#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " -#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" +#: ../../source/ref-changelog.md:1376 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "`on_configure_fit` => `configure_fit`" -#~ msgid "flwr (Python API reference)" -#~ msgstr "flwr(Python API 参考)" +#: ../../source/ref-changelog.md:1377 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "`on_aggregate_fit` => `aggregate_fit`" -#~ msgid "..." -#~ msgstr "..." +#: ../../source/ref-changelog.md:1381 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" +"已废弃的 `DefaultStrategy` ([#479](https://github.com/adap/flower/pull/479)) " +"。迁移时请使用 `FedAvg`。" -#~ msgid "Starting a client with an insecure server connection:" -#~ msgstr "使用不安全的服务器连接启动客户端:" +#: ../../source/ref-changelog.md:1382 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "简化示例和baselines([#484](https://github.com/adap/flower/pull/484))。" -#~ msgid "server.strategy.FedAvg" -#~ msgstr "server.strategy.FedAvg" +#: ../../source/ref-changelog.md:1383 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" +"删除了策略界面中目前未使用的 " +"\"on_conclude_round\"([#483](https://github.com/adap/flower/pull/483))。" -#~ msgid "server.strategy.FedAvgM" -#~ msgstr "server.strategy.FedAvgM" +#: ../../source/ref-changelog.md:1384 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" +"将最小 Python 版本设为 3.6.1,而不是 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." -#~ msgid "Configurable FedAvg with Momentum strategy implementation." -#~ msgstr "可配置的 FedAvg 动量策略实施。" +#: ../../source/ref-changelog.md:1385 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" +"改进了 `Strategy` " +"docstrings([#470](https://github.com/adap/flower/pull/470))。" -#~ msgid "Fraction of clients used during training. Defaults to 0.1." -#~ msgstr "训练期间使用客户的比例。默认为 0.1。" +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "项目实例" -#~ msgid "Fraction of clients used during validation. Defaults to 0.1." -#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" +"Flower 附带了许多使用示例。这些示例演示了如何使用 Flower 联邦不同类型的现有机器学习形式,通常是利用流行的机器学习框架,如 " +"`PyTorch `_ 或 `TensorFlow " +"`_。" -#~ msgid "server.strategy.FedMedian" -#~ msgstr "server.strategy.FedMedian" +#: ../../source/ref-example-projects.rst:9 +#, fuzzy +msgid "The following examples are available as standalone projects." +msgstr "以下示例可作为独立项目使用。" -#~ msgid "server.strategy.QFedAvg" -#~ msgstr "server.strategy.QFedAvg" +#: ../../source/ref-example-projects.rst:12 +#, fuzzy +msgid "Quickstart TensorFlow/Keras" +msgstr "快速入门 TensorFlow" -#~ msgid "server.strategy.FedOpt" -#~ msgstr "server.strategy.FedOpt" +#: ../../source/ref-example-projects.rst:14 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "TensorFlow/Keras 快速入门示例展示了使用 MobileNetV2 进行的 CIFAR-10 图像分类:" -#~ msgid "Configurable FedAdagrad strategy implementation." -#~ msgstr "可配置的 FedAdagrad 策略实施。" +#: ../../source/ref-example-projects.rst:17 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" +"`TensorFlow快速入门 (代码) `_" -#~ msgid "Federated Optim strategy interface." -#~ msgstr "Federated Optim 策略界面。" +#: ../../source/ref-example-projects.rst:19 +#, fuzzy +msgid ":doc:`Quickstart TensorFlow (Tutorial) `" +msgstr "" +"`TensorFlow快速入门 (教程) `_" -#~ msgid "server.strategy.FedProx" -#~ msgstr "server.strategy.FedProx" +#: ../../source/ref-example-projects.rst:20 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" +"`TensorFlow快速入门 (博客) `_" -#~ msgid "Configurable FedProx strategy implementation." -#~ msgstr "可配置的 FedProx 策略实施。" +#: ../../source/ref-example-projects.rst:24 +#: ../../source/tutorial-quickstart-pytorch.rst:4 +msgid "Quickstart PyTorch" +msgstr "PyTorch快速入门" -#~ msgid "server.strategy.FedAdagrad" -#~ msgstr "server.strategy.FedAdagrad" +#: ../../source/ref-example-projects.rst:26 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "PyTorch 快速入门范例展示了使用简单卷积神经网络进行 CIFAR-10 图像分类的情况:" -#~ msgid "Paper: https://arxiv.org/abs/2003.00295" -#~ msgstr "论文: https://arxiv.org/abs/2003.00295" +#: ../../source/ref-example-projects.rst:29 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" +"`PyTorch快速入门 (代码) `_" -#~ msgid "Federated learning strategy using Adagrad on server-side." -#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" +#: ../../source/ref-example-projects.rst:31 +#, fuzzy +msgid ":doc:`Quickstart PyTorch (Tutorial) `" +msgstr "" +"`PyTorch快速入门 (教程) `_" -#~ msgid "server.strategy.FedAdam" -#~ msgstr "server.strategy.FedAdam" +#: ../../source/ref-example-projects.rst:34 +msgid "PyTorch: From Centralized To Federated" +msgstr "PyTorch: 从集中式到联邦式" -#~ msgid "server.strategy.FedYogi" -#~ msgstr "server.strategy.FedYogi" +#: ../../source/ref-example-projects.rst:36 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "本例展示了如何使用 Flower 联邦化一个普通的 PyTorch 项目:" -#~ msgid "Adaptive Federated Optimization using Yogi." -#~ msgstr "使用 Yogi 的自适应联合优化。" +#: ../../source/ref-example-projects.rst:38 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" +"PyTorch: 从集中式到联邦式(代码) `_" -#~ msgid "Federated learning strategy using Yogi on server-side." -#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" +#: ../../source/ref-example-projects.rst:40 +#, fuzzy +msgid "" +":doc:`PyTorch: From Centralized To Federated (Tutorial) `" +msgstr "" +"PyTorch: 从集中式到联邦式(教程) `_" -#~ msgid "Paper: https://arxiv.org/abs/1803.01498" -#~ msgstr "论文:https://arxiv.org/abs/1803.01498" +#: ../../source/ref-example-projects.rst:44 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "树莓派和 Nvidia Jetson 上的联邦学习" -#~ msgid "server.strategy.Krum" -#~ msgstr "server.strategy.Krum" +#: ../../source/ref-example-projects.rst:46 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "本示例展示了如何利用 Flower 建立一个跨 Raspberry Pi 和 Nvidia Jetson 运行的联邦学习系统:" -#~ msgid "Configurable Krum strategy implementation." -#~ msgstr "可配置的 Krum 策略实施。" +#: ../../source/ref-example-projects.rst:49 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" +"Raspberry Pi 和 Nvidia Jetson 上的联邦学习(代码) " +"`_" -#~ msgid "server.strategy.Bulyan" -#~ msgstr "server.strategy.Bulyan" +#: ../../source/ref-example-projects.rst:51 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" +"Raspberry Pi和 Nvidia Jetson 上的联邦学习(博客) " +"`_" -#~ msgid "Bulyan strategy implementation." -#~ msgstr "Bulyan策略的实施。" +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "本页收集了有关 \"Flower 联邦学习 \"常见问题的答案。" -#~ msgid "server.strategy.FedXgbNnAvg" -#~ msgstr "server.strategy.FedXgbNnAvg" +#: ../../source/ref-faq.rst +#, fuzzy +msgid ":fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab?" +msgstr ":fa:`eye,mr-1` Flower 可以在 Juptyter Notebooks / Google Colab 上运行吗?" -#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." -#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" +#: ../../source/ref-faq.rst:9 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "是的,它可以!Flower 甚至还进行了一些底层优化,使其在 Colab 上运行得更好。下面是一个快速启动示例:" -#~ msgid "server.strategy.DPFedAvgAdaptive" -#~ msgstr "server.strategy.DPFedAvgAdaptive" +#: ../../source/ref-faq.rst:11 +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" +"`Flower 模拟 PyTorch " +"`_" -#~ msgid "" -#~ "**Fix the incorrect return types of " -#~ "Strategy** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" -#~ msgstr "" -#~ "**修复策略的错误返回类型** " -#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#: ../../source/ref-faq.rst:12 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" +"`Flower模拟TensorFlow/Keras " +"`_" -#~ msgid "" -#~ "The types of the return values in" -#~ " the docstrings in two methods " -#~ "(`aggregate_fit` and `aggregate_evaluate`) now " -#~ "match the hint types in the code." -#~ msgstr "" -#~ "两个方法(\"aggregate_fit \"和 " -#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr ":fa:`eye,mr-1` 如何在 Raspberry Pi 上运行联邦学习?" -#~ msgid "" -#~ "**Update Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" -#~ msgstr "" -#~ "** 更新 Flower Examples** " -#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," -#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#: ../../source/ref-faq.rst:16 +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" +"请点击此处查看有关嵌入式设备联邦学习的 " +"\"博文\"`_和相应的" +" \"GitHub 代码示例\"`_。" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." -#~ msgstr "" -#~ "对于客户端就需要做这么多。我们仅需要实现 " -#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" -#~ " :code:`\"0.0.0.0:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" -#~ " server_address 。" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr ":fa:`eye,mr-1` Flower 是否支持安卓设备上的联邦学习?" -#~ msgid "" -#~ "That's it for the client. We only" -#~ " have to implement :code:`Client` or " -#~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()`. The string " -#~ ":code:`\"[::]:8080\"` tells the client which" -#~ " server to connect to. In our " -#~ "case we can run the server and " -#~ "the client on the same machine, " -#~ "therefore we use :code:`\"[::]:8080\"`. If " -#~ "we run a truly federated workload " -#~ "with the server and clients running " -#~ "on different machines, all that needs" -#~ " to change is the :code:`server_address`" -#~ " we point the client at." -#~ msgstr "" -#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " -#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " -#~ "即可。字符串 :code:`\"[::]:8080\"` " -#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " -#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" -#~ " server_address 。" +#: ../../source/ref-faq.rst:20 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" +"是的,确实如此。请查看我们的 \"博客文章 `_\" 或查看代码示例:" -#~ msgid "" -#~ "Let's now load the CIFAR-10 training " -#~ "and test set, partition them into " -#~ "ten smaller datasets (each split into" -#~ " training and validation set), and " -#~ "wrap the resulting partitions by " -#~ "creating a PyTorch ``DataLoader`` for " -#~ "each of them:" -#~ msgstr "" -#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " -#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " -#~ "``DataLoader`` 来包装由此产生的分割集:" +#: ../../source/ref-faq.rst:22 +msgid "" +"`Android Kotlin example `_" +msgstr "`Android Kotlin 示例 `_" -#~ msgid "|e1dd4b4129b040bea23a894266227080|" -#~ msgstr "|e1dd4b4129b040bea23a894266227080|" +#: ../../source/ref-faq.rst:23 +msgid "`Android Java example `_" +msgstr "Android Java 示例 `_" -#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" -#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr ":fa:`eye,mr-1` 我可以将联邦学习与区块链结合起来吗?" -#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" -#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#: ../../source/ref-faq.rst:27 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" -#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" -#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" +#: ../../source/ref-faq.rst:30 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" -#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" -#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" +#: ../../source/ref-faq.rst:30 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" -#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" -#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" +#: ../../source/ref-faq.rst:31 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "扩大联邦学习的规模" -#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" -#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" +"`Flower meets Nevermined GitHub Repository `_." -#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" -#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#: ../../source/ref-faq.rst:33 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" +"`Flower meets Nevermined YouTube 视频 " +"`_." -#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" -#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" +#: ../../source/ref-faq.rst:34 +#, fuzzy +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" +"`Flower meets KOSMoS `_." -#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" -#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" +#: ../../source/ref-faq.rst:35 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" +"`Flower meets Talan博文 `_ 。" -#~ msgid "|88002bbce1094ba1a83c9151df18f707|" -#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" +#: ../../source/ref-faq.rst:36 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" +"`Flower meets Talan GitHub Repository " +"`_ ." -#~ msgid "|391766aee87c482c834c93f7c22225e2|" -#~ msgstr "|391766aee87c482c834c93f7c22225e2|" +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "遥测功能" -#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" -#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" +"Flower 开源项目收集**匿名**使用指标,以便在充分知情的情况下做出改进 Flower 的决定。这样做能让 Flower 团队了解 " +"Flower 的使用情况以及用户可能面临的挑战。" -#~ msgid "|a23d9638f96342ef9d25209951e2d564|" -#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "**Flower 是一个用于协作式人工智能和数据科学的友好框架。** Flower 遵循这一声明,让不想分享匿名使用指标的用户可以轻松禁用遥测技术。" -#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" -#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "原则" -#~ msgid "" -#~ "Change ``!pip install -q 'flwr[simulation]'" -#~ " torch torchvision matplotlib`` to ``!pip" -#~ " install -q 'flwr-1.6.0-py3-none-" -#~ "any.whl[simulation]' torch torchvision matplotlib``" -#~ msgstr "" -#~ "将``!pip install -q 'flwr[simulation]' torch" -#~ " torchvision matplotlib``更改为``!pip install -q " -#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " -#~ "torch torchvision matplotlib``" +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "我们遵循严格的匿名使用指标收集原则:" -#~ msgid "" -#~ "All that's left to do it to " -#~ "define a function that loads both " -#~ "model and data, creates a " -#~ ":code:`CifarClient`, and starts this client." -#~ " You load your data and model " -#~ "by using :code:`cifar.py`. Start " -#~ ":code:`CifarClient` with the function " -#~ ":code:`fl.client.start_numpy_client()` by pointing " -#~ "it at the same IP address we " -#~ "used in :code:`server.py`:" -#~ msgstr "" -#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" -#~ " :code:`cifar.py` 加载数据和模型。使用函数 " -#~ ":code:`fl.client.start_numpy_client()` 启动 " -#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " -#~ "IP 地址:" +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "**可选:** 您始终可以禁用遥测功能;请继续阅读\"[如何退出](#how-to-opt-out)\"。" -#~ msgid "" -#~ "The :code:`VirtualClientEngine` schedules, launches" -#~ " and manages `virtual` clients. These " -#~ "clients are identical to `non-virtual`" -#~ " clients (i.e. the ones you launch" -#~ " via the command `flwr.client.start_numpy_client" -#~ " `_)" -#~ " in the sense that they can be" +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" +"**匿名:** 报告的使用指标是匿名的,不包含任何个人身份信息 (PII)。请参阅\"[收集的指标](#collected-metrics) " +"\"了解报告的指标。" + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" +"**透明:** 您可以轻松查看正在报告的匿名指标;请参阅\"[如何查看正在报告的指标](#how-to-inspect-what-is-" +"being-reported)\"部分" + +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "**欢迎反馈:** 如果您有反馈意见,可以随时联系我们;详情请参见\"[如何联系我们](#how-to-contact-us) \"部分。" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "如何退出" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" +"Flower 启动时,会检查环境变量 `FLWR_TELEMETRY_ENABLED` 是否存在。通过设置 " +"`FLWR_TELEMETRY_ENABLED=0` 可以轻松禁用遥测功能。假设你启动的是 Flower " +"服务器或客户端,只需在命令前添加以下内容即可:" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" +"或者,你也可以在 `.bashrc`(或任何适用于你的环境的配置文件)中导出 `FLWR_TELEMETRY_ENABLED=0` 来永久禁用 " +"Flower telemetry。" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "收集的指标" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "Flower 遥测技术收集以下指标:" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "**了解目前使用的 Flower 版本。这有助于我们决定是否应该投入精力为旧版本的 Flower 发布补丁版本,还是利用带宽来构建新功能。" + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "**操作系统**使我们能够回答以下问题: *我们应该为 Linux、macOS 还是 Windows 创建更多指南?*" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "**例如,了解 Python 版本有助于我们决定是否应该投入精力支持旧版本的 Python,还是停止支持这些版本并开始利用新的 Python 功能。" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "**硬件属性** 了解 Flower 的硬件使用环境,有助于决定我们是否应在支持低资源环境等方面投入更多精力。" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "** 执行模式** 了解 Flower 的启动执行模式,能让我们了解某些功能的使用率,并据此更好地确定优先级。" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" +"**每次 Flower 工作负载启动时,Flower 遥测都会随机分配一个内存集群 ID。这样,我们就能了解哪些设备类型不仅启动了 Flower " +"工作负载,而且还成功完成了它们。" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" +"**Source.** Flower 遥测会在第一次生成遥测事件时,尝试在 `~/.flwr/source` 中存储一个随机源 ID。源 ID " +"对于识别问题是否反复出现或问题是否由多个集群同时运行触发(这在模拟中经常发生)非常重要。例如,如果设备同时运行多个工作负载并导致问题,那么为了重现问题,必须同时启动多个工作负载。" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.ai`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" +"您可以随时删除源 ID。如果您希望删除特定源 ID 下记录的所有事件,可以向 `telemetry@flower.ai` 发送删除请求,并提及该源" +" ID。届时,与该源 ID 相关的所有事件都将被永久删除。" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" +"我们不会收集任何个人身份信息。如果您认为所收集的任何指标可能以任何方式被滥用,请[与我们联系](#how-to-contact-" +"us)。我们将更新本页面,以反映对所收集指标的任何更改,并在更新日志中公布更改内容。" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "如果您认为其他指标有助于我们更好地指导决策,请告诉我们!我们将仔细审查这些指标;如果我们确信它们不会损害用户隐私,我们可能会添加这些指标。" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "如何检查报告中的内容" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" +"我们希望能让您轻松查看所报告的匿名使用指标。通过设置环境变量 `FLWR_TELEMETRY_LOGGING=1` " +"可以查看所有报告的遥测信息。日志记录默认为禁用。您可以不使用 `FLWR_TELEMETRY_ENABLED` " +"而单独使用日志记录,这样就可以在不发送任何指标的情况下检查遥测功能。" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "在不发送任何匿名使用指标的情况下检查 Flower 遥测,可使用这两个环境变量:" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "如何联系我们" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.ai/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.ai`)." +msgstr "" +"我们希望听到您的意见。如果您对如何改进我们处理匿名使用指标的方式有任何反馈或想法,请通过 [Slack](https://flower.ai" +"/join-slack/) (频道 `#telemetry`)或电子邮件 (`telemetry@flower.ai`)与我们联系。" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 Android 应用程序。" + +#: ../../source/tutorial-quickstart-android.rst:4 +msgid "Quickstart Android" +msgstr "快速入门 Android" + +#: ../../source/tutorial-quickstart-android.rst:9 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "让我们在 Android 上使用 TFLite 和 Flower 构建一个联邦学习系统!" + +#: ../../source/tutorial-quickstart-android.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" +"请参阅`完整代码示例 " +"`_了解更多信息。" + +#: ../../source/tutorial-quickstart-fastai.rst:4 +msgid "Quickstart fastai" +msgstr "快速入门 fastai" + +#: ../../source/tutorial-quickstart-fastai.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:11 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:18 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:31 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:32 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "并激活虚拟环境:" + +#: ../../source/tutorial-quickstart-fastai.rst:41 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:54 +#: ../../source/tutorial-quickstart-huggingface.rst:61 +#: ../../source/tutorial-quickstart-mlx.rst:60 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:55 +#: ../../source/tutorial-quickstart-pytorch.rst:62 +#: ../../source/tutorial-quickstart-tensorflow.rst:62 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:98 +#: ../../source/tutorial-quickstart-huggingface.rst:112 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:105 +#: ../../source/tutorial-quickstart-pytorch.rst:103 +#: ../../source/tutorial-quickstart-tensorflow.rst:103 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:108 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +#, fuzzy +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" + +#: ../../source/tutorial-quickstart-huggingface.rst:4 +msgid "Quickstart 🤗 Transformers" +msgstr "🤗 Transformers快速入门" + +#: ../../source/tutorial-quickstart-huggingface.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:17 +#: ../../source/tutorial-quickstart-mlx.rst:17 +#: ../../source/tutorial-quickstart-pytorch.rst:18 +#: ../../source/tutorial-quickstart-tensorflow.rst:18 +#, fuzzy +msgid "" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" + +#: ../../source/tutorial-quickstart-huggingface.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +#: ../../source/tutorial-quickstart-mlx.rst:32 +#: ../../source/tutorial-quickstart-pytorch.rst:34 +#: ../../source/tutorial-quickstart-tensorflow.rst:34 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:47 +#: ../../source/tutorial-quickstart-mlx.rst:46 +#: ../../source/tutorial-quickstart-pytorch.rst:48 +#: ../../source/tutorial-quickstart-tensorflow.rst:48 +msgid "" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:54 +#: ../../source/tutorial-quickstart-pytorch.rst:55 +#: ../../source/tutorial-quickstart-tensorflow.rst:55 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:102 +msgid "You can also run the project with GPU as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:109 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:120 +#: ../../source/tutorial-quickstart-mlx.rst:110 +#: ../../source/tutorial-quickstart-pytorch.rst:111 +msgid "" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:115 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +#, fuzzy +msgid "The Data" +msgstr "加载数据" + +#: ../../source/tutorial-quickstart-huggingface.rst:126 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:171 +#: ../../source/tutorial-quickstart-mlx.rst:155 +#: ../../source/tutorial-quickstart-pytorch.rst:150 +#: ../../source/tutorial-quickstart-tensorflow.rst:139 +#, fuzzy +msgid "The Model" +msgstr "训练模型" + +#: ../../source/tutorial-quickstart-huggingface.rst:173 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " +"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" + +#: ../../source/tutorial-quickstart-huggingface.rst:185 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:188 +msgid "" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:228 +#: ../../source/tutorial-quickstart-mlx.rst:199 +#: ../../source/tutorial-quickstart-pytorch.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:168 +#, fuzzy +msgid "The ClientApp" +msgstr "客户端" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:257 +#: ../../source/tutorial-quickstart-pytorch.rst:250 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:283 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:316 +#: ../../source/tutorial-quickstart-mlx.rst:361 +#: ../../source/tutorial-quickstart-pytorch.rst:307 +#: ../../source/tutorial-quickstart-tensorflow.rst:232 +#, fuzzy +msgid "The ServerApp" +msgstr "服务器" + +#: ../../source/tutorial-quickstart-huggingface.rst:318 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:356 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:361 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "阅读本联邦学习快速入门教程,了解如何使用 Flower 创建 iOS 应用程序,并在 MNIST 上训练神经网络。" + +#: ../../source/tutorial-quickstart-ios.rst:4 +msgid "Quickstart iOS" +msgstr "快速入门 iOS" + +#: ../../source/tutorial-quickstart-ios.rst:9 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "在本教程中,我们将学习如何在 iOS 设备上使用 Flower 和 CoreML 在 MNIST 上训练神经网络。" + +#: ../../source/tutorial-quickstart-ios.rst:12 +#, fuzzy +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `. For the Flower client " +"implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" +"首先,为了运行 Flower Python 服务器,建议创建一个虚拟环境,并在 `virtualenv " +"`_ 中运行一切。对于在 iOS 中实现 " +"Flower 客户端,建议使用 Xcode 作为我们的集成开发环境。" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "我们的示例包括一个 Python *服务器*和两个 iPhone *客户端*,它们都具有相同的模型。" + +#: ../../source/tutorial-quickstart-ios.rst:20 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成独立的模型参数。然后,这些参数更新会被发送到*服务器*,由*服务器*汇总后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-ios.rst:26 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "现在我们已经有了一个大致的概念,让我们开始设置 Flower 服务器环境吧。首先,我们需要安装 Flower。你可以使用 pip 来安装:" + +#: ../../source/tutorial-quickstart-ios.rst:33 +msgid "Or Poetry:" +msgstr "或者Poetry:" + +#: ../../source/tutorial-quickstart-ios.rst:40 +#: ../../source/tutorial-quickstart-scikitlearn.rst:43 +#: ../../source/tutorial-quickstart-xgboost.rst:65 +msgid "Flower Client" +msgstr "Flower 客户端" + +#: ../../source/tutorial-quickstart-ios.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" +"现在我们已经安装了所有依赖项,让我们使用 CoreML 作为本地训练框架和 MNIST " +"作为数据集,运行一个简单的分布式训练。为了简单起见,我们将使用 CoreML 的完整 Flower 客户端,该客户端已在 Swift SDK " +"中实现并存储。客户端实现如下:" + +#: ../../source/tutorial-quickstart-ios.rst:80 +#, fuzzy +msgid "" +"Let's create a new application project in Xcode and add ``flwr`` as a " +"dependency in your project. For our application, we will store the logic " +"of our app in ``FLiOSModel.swift`` and the UI elements in " +"``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` in this" +" quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" +"让我们在 Xcode 中创建一个新的应用程序项目,并在项目中添加 :code:`flwr` 作为依赖关系。对于我们的应用程序,我们将在 " +":code:`FLiOSModel.swift` 中存储应用程序的逻辑,在 :code:`ContentView.swift` 中存储 UI " +"元素。在本快速入门中,我们将更多地关注 :code:`FLiOSModel.swift`。请参阅 `完整代码示例 " +"`_ 以了解更多有关应用程序的信息。" + +#: ../../source/tutorial-quickstart-ios.rst:86 +#, fuzzy +msgid "Import Flower and CoreML related packages in ``FLiOSModel.swift``:" +msgstr "在 :code:`FLiOSModel.swift` 中导入 Flower 和 CoreML 相关软件包:" + +#: ../../source/tutorial-quickstart-ios.rst:94 +#, fuzzy +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +"``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into" +" ``MLBatchProvider`` object. The preprocessing is done inside " +"``DataLoader.swift``." +msgstr "" +"然后通过拖放将 mlmodel 添加到项目中,在部署到 iOS 设备时,mlmodel 将被捆绑到应用程序中。我们需要传递 url 以访问 " +"mlmodel 并运行 CoreML 机器学习进程,可通过调用函数 :code:`Bundle.main.url` 获取。对于 MNIST " +"数据集,我们需要将其预处理为 :code:`MLBatchProvider` 对象。预处理在 :code:`DataLoader.swift` " +"中完成。" + +#: ../../source/tutorial-quickstart-ios.rst:112 +#, fuzzy +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"information beforehand, through looking at the model specification, which" +" are written as proto files. The implementation can be seen in " +"``MLModelInspect``." +msgstr "" +"由于 CoreML 不允许在训练前查看模型参数,而在训练过程中或训练后访问模型参数只能通过指定层名来完成,因此我们需要事先通过查看模型规范(写成 " +"proto 文件)来了解这些信息。具体实现可参见 :code:`MLModelInspect`。" + +#: ../../source/tutorial-quickstart-ios.rst:118 +#, fuzzy +msgid "" +"After we have all of the necessary information, let's create our Flower " +"client." +msgstr "获得所有必要信息后,让我们创建 Flower 客户端。" + +#: ../../source/tutorial-quickstart-ios.rst:133 +#, fuzzy +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function ``startFlwrGRPC``." +msgstr "然后启动 Flower gRPC 客户端,并通过将 Flower 客户端传递给函数 :code:`startFlwrGRPC` 来开始与服务器通信。" + +#: ../../source/tutorial-quickstart-ios.rst:141 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` or call " +"the provided ``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute" +" ``hostname`` and ``port`` tells the client which server to connect to. " +"This can be done by entering the hostname and port in the application " +"before clicking the start button to start the federated learning process." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或调用提供的 :code:`MLFlwrClient` 并调用 " +":code:`startFlwrGRPC()`。属性 :code:`hostname` 和 :code:`port` " +"会告诉客户端要连接到哪个服务器。这可以通过在应用程序中输入主机名和端口来实现,然后再点击开始按钮启动联邦学习进程。" + +#: ../../source/tutorial-quickstart-ios.rst:148 +#: ../../source/tutorial-quickstart-scikitlearn.rst:179 +#: ../../source/tutorial-quickstart-xgboost.rst:358 +msgid "Flower Server" +msgstr "Flower 服务器" + +#: ../../source/tutorial-quickstart-ios.rst:150 +#, fuzzy +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +"``server.py``, import Flower and start the server:" +msgstr "" +"对于简单的工作负载,我们可以启动 Flower 服务器,并将所有配置选项保留为默认值。在名为 :code:`server.py` 的文件中,导入 " +"Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:254 +msgid "Train the model, federated!" +msgstr "联邦训练模型!" + +#: ../../source/tutorial-quickstart-ios.rst:163 +#: ../../source/tutorial-quickstart-xgboost.rst:590 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "客户端和服务器都已准备就绪,我们现在可以运行一切,看看联邦学习的实际效果。FL 系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" + +#: ../../source/tutorial-quickstart-ios.rst:171 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" +"服务器运行后,我们就可以在不同的终端启动客户端。通过 Xcode 构建并运行客户端,一个通过 Xcode 模拟器,另一个通过部署到 " +"iPhone。要了解更多有关如何将应用程序部署到 iPhone 或模拟器的信息,请访问 `此处 " +"`_。" + +#: ../../source/tutorial-quickstart-ios.rst:177 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in ``examples/ios``." +msgstr "" +"恭喜您! 您已经成功地在 ios 设备中构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可在 " +":code:`examples/ios` 中找到。" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Jax 在 scikit-learn 数据集上训练线性回归模型。" + +#: ../../source/tutorial-quickstart-jax.rst:4 +msgid "Quickstart JAX" +msgstr "快速入门 JAX" + +#: ../../source/tutorial-quickstart-jax.rst:9 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" +"本教程将向您展示如何使用 Flower 构建现有 JAX 的联邦学习版本。我们将使用 JAX 在 scikit-learn " +"数据集上训练线性回归模型。我们将采用与 `PyTorch - 从集中式到联邦式 " +"`_ 教程中类似的示例结构。首先,我们根据 `JAX 的线性回归 " +"`_" +" 教程构建集中式训练方法。然后,我们在集中式训练代码的基础上以联邦方式运行训练。" + +#: ../../source/tutorial-quickstart-jax.rst:20 +#, fuzzy +msgid "" +"Before we start building our JAX example, we need install the packages " +"``jax``, ``jaxlib``, ``scikit-learn``, and ``flwr``:" +msgstr "" +"在开始构建 JAX 示例之前,我们需要安装软件包 :code:`jax`、:code:`jaxlib`、:code:`scikit-learn` " +"和 :code:`flwr`:" + +#: ../../source/tutorial-quickstart-jax.rst:28 +msgid "Linear Regression with JAX" +msgstr "使用 JAX 进行线性回归" + +#: ../../source/tutorial-quickstart-jax.rst:30 +#, fuzzy +msgid "" +"We begin with a brief description of the centralized training code based " +"on a ``Linear Regression`` model. If you want a more in-depth explanation" +" of what's going on then have a look at the official `JAX documentation " +"`_." +msgstr "" +"首先,我们将简要介绍基于 :code:`Linear Regression` 模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 " +"`JAX 文档 `_。" + +#: ../../source/tutorial-quickstart-jax.rst:34 +#, fuzzy +msgid "" +"Let's create a new file called ``jax_training.py`` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages ``jax`` and ``jaxlib`` need to be " +"imported. In addition, we need to import ``sklearn`` since we use " +"``make_regression`` for the dataset and ``train_test_split`` to split the" +" dataset into a training and test set. You can see that we do not yet " +"import the ``flwr`` package for federated learning. This will be done " +"later." +msgstr "" +"让我们创建一个名为 :code:`jax_training.py` 的新文件,其中包含传统(集中式)线性回归训练所需的所有组件。首先,需要导入 " +"JAX 包 :code:`jax` 和 :code:`jaxlib`。此外,我们还需要导入 :code:`sklearn`,因为我们使用 " +":code:`make_regression` 创建数据集,并使用 :code:`train_test_split` " +"将数据集拆分成训练集和测试集。您可以看到,我们还没有导入用于联邦学习的 :code:`flwr` 软件包,这将在稍后完成。" + +#: ../../source/tutorial-quickstart-jax.rst:51 +#, fuzzy +msgid "The ``load_data()`` function loads the mentioned training and test sets." +msgstr ":code:`load_data()` 函数会加载上述训练集和测试集。" + +#: ../../source/tutorial-quickstart-jax.rst:63 +#, fuzzy +msgid "" +"The model architecture (a very simple ``Linear Regression`` model) is " +"defined in ``load_model()``." +msgstr "模型结构(一个非常简单的 :code:`Linear Regression` 线性回归模型)在 :code:`load_model()` 中定义。" + +#: ../../source/tutorial-quickstart-jax.rst:73 +#, fuzzy +msgid "" +"We now need to define the training (function ``train()``), which loops " +"over the training set and measures the loss (function ``loss_fn()``) for " +"each batch of training examples. The loss function is separate since JAX " +"takes derivatives with a ``grad()`` function (defined in the ``main()`` " +"function and called in ``train()``)." +msgstr "" +"现在,我们需要定义训练函数( :code:`train()`)。它循环遍历训练集,并计算每批训练数据的损失值(函数 " +":code:`loss_fn()`)。由于 JAX 使用 :code:`grad()` 函数提取导数(在 :code:`main()` " +"函数中定义,并在 :code:`train()` 中调用),因此损失函数是独立的。" + +#: ../../source/tutorial-quickstart-jax.rst:95 +#, fuzzy +msgid "" +"The evaluation of the model is defined in the function ``evaluation()``. " +"The function takes all test examples and measures the loss of the linear " +"regression model." +msgstr "模型的评估在函数 :code:`evaluation()` 中定义。该函数获取所有测试数据,并计算线性回归模型的损失值。" + +#: ../../source/tutorial-quickstart-jax.rst:107 +#, fuzzy +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the ``jax.grad()`` function is defined in " +"``main()`` and passed to ``train()``." +msgstr "" +"在定义了数据加载、模型架构、训练和评估之后,我们就可以把这些放在一起,使用 JAX " +"训练我们的模型了。如前所述,:code:`jax.grad()` 函数在 :code:`main()` 中定义,并传递给 " +":code:`train()`。" + +#: ../../source/tutorial-quickstart-jax.rst:126 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "现在您可以运行(集中式)JAX 线性回归工作了:" + +#: ../../source/tutorial-quickstart-jax.rst:132 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "到目前为止,如果你以前使用过 JAX,就会对这一切感到很熟悉。下一步,让我们利用已构建的代码创建一个简单的联邦学习系统(一个服务器和两个客户端)。" + +#: ../../source/tutorial-quickstart-jax.rst:137 +msgid "JAX meets Flower" +msgstr "JAX 结合 Flower" + +#: ../../source/tutorial-quickstart-jax.rst:139 +#, fuzzy +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +"``jax_training.py`` for the *clients* that are connected to the *server*." +" The *server* sends model parameters to the clients. The *clients* run " +"the training and update the parameters. The updated parameters are sent " +"back to the *server*, which averages all received parameter updates. This" +" describes one round of the federated learning process, and we repeat " +"this for multiple rounds." +msgstr "" +"把现有工作联邦化的概念始终是相同的,也很容易理解。我们要启动一个*服务器*,然后对连接到*服务器*的*客户端*运行 " +":code:`jax_training.py`中的代码。*服务器*向客户端发送模型参数,*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后服务器对所有收到的参数进行平均聚合。以上的描述构成了一轮联邦学习,我们将重复进行多轮学习。" + +#: ../../source/tutorial-quickstart-jax.rst:167 +#, fuzzy +msgid "" +"Finally, we will define our *client* logic in ``client.py`` and build " +"upon the previously defined JAX training in ``jax_training.py``. Our " +"*client* needs to import ``flwr``, but also ``jax`` and ``jaxlib`` to " +"update the parameters on our JAX model:" +msgstr "" +"最后,我们将在 :code:`client.py` 中定义我们的 *client* 逻辑,并以之前在 " +":code:`jax_training.py` 中定义的 JAX 训练为基础。我们的 *client* 需要导入 " +":code:`flwr`,还需要导入 :code:`jax` 和 :code:`jaxlib` 以更新 JAX 模型的参数:" + +#: ../../source/tutorial-quickstart-jax.rst:182 +#, fuzzy +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either ``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our " +"implementation will be based on ``flwr.client.NumPyClient`` and we'll " +"call it ``FlowerClient``. ``NumPyClient`` is slightly easier to implement" +" than ``Client`` if you use a framework with good NumPy interoperability " +"(like JAX) because it avoids some of the boilerplate that would otherwise" +" be necessary. ``FlowerClient`` needs to implement four methods, two " +"methods for getting/setting model parameters, one method for training the" +" model, and one method for testing the model:" +msgstr "" +"实现一个 Flower *client*基本上意味着去实现一个 :code:`flwr.client.Client` 或 " +":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +":code:`flwr.client.NumPyClient`,并将其命名为 :code:`FlowerClient`。如果使用具有良好 " +"NumPy 互操作性的框架(如 JAX),:code:`NumPyClient` 比 " +":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " +"需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#: ../../source/tutorial-quickstart-jax.rst:194 +#, fuzzy +msgid "``set_parameters (optional)``" +msgstr ":code:`set_parameters (可选)`" + +#: ../../source/tutorial-quickstart-jax.rst:193 +#, fuzzy +msgid "transform parameters to NumPy ``ndarray``'s" +msgstr "将参数转换为 NumPy :code:`ndarray`格式" + +#: ../../source/tutorial-quickstart-jax.rst:203 +msgid "get the updated local model parameters and return them to the server" +msgstr "获取更新后的本地模型参数并返回服务器" + +#: ../../source/tutorial-quickstart-jax.rst:208 +msgid "return the local loss to the server" +msgstr "向服务器返回本地损失值" + +#: ../../source/tutorial-quickstart-jax.rst:210 +#, fuzzy +msgid "" +"The challenging part is to transform the JAX model parameters from " +"``DeviceArray`` to ``NumPy ndarray`` to make them compatible with " +"`NumPyClient`." +msgstr "" +"具有挑战性的部分是将 JAX 模型参数从 :code:`DeviceArray` 转换为 :code:`NumPy ndarray`,使其与 " +"`NumPyClient` 兼容。" + +#: ../../source/tutorial-quickstart-jax.rst:213 +#, fuzzy +msgid "" +"The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the " +"functions ``train()`` and ``evaluate()`` previously defined in " +"``jax_training.py``. So what we really do here is we tell Flower through " +"our ``NumPyClient`` subclass which of our already defined functions to " +"call for training and evaluation. We included type annotations to give " +"you a better understanding of the data types that get passed around." +msgstr "" +"这两个 :code:`NumPyClient` 方法 :code:`fit` 和 :code:`evaluate` 使用了之前在 " +":code:`jax_training.py` 中定义的函数 :code:`train()` 和 " +":code:`evaluate()`。因此,我们在这里要做的就是通过 :code:`NumPyClient` 子类告知 Flower " +"在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" + +#: ../../source/tutorial-quickstart-jax.rst:286 +msgid "Having defined the federation process, we can run it." +msgstr "定义了联邦进程后,我们就可以运行它了。" + +#: ../../source/tutorial-quickstart-jax.rst:315 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "确保服务器仍在运行,然后在每个客户端窗口就能看到你的 JAX 项目在两个客户端上运行联邦学习了。祝贺!" + +#: ../../source/tutorial-quickstart-jax.rst:321 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" +"此示例的源代码经过长期改进,可在此处找到: `Quickstart JAX " +"`_。我们的示例有些过于简单,因为两个客户端都加载了相同的数据集。" + +#: ../../source/tutorial-quickstart-jax.rst:325 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "现在,您已准备好进行更深一步探索了。例如使用更复杂的模型或使用不同的数据集会如何?增加更多客户端会如何?" + +#: ../../source/tutorial-quickstart-mlx.rst:4 +#, fuzzy +msgid "Quickstart MLX" +msgstr "快速入门 JAX" + +#: ../../source/tutorial-quickstart-mlx.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train simple MLP" +" on MNIST using Flower and MLX. It is recommended to create a virtual " +"environment and run everything within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-mlx.rst:10 +msgid "" +"Let's use `flwr new` to create a complete Flower+MLX project. It will " +"generate all the files needed to run, by default with the Simulation " +"Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:25 +msgid "" +"Then, run the command below. You will be prompted to select of the " +"available templates (choose ``MLX``), give a name to your project, and " +"type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:53 +msgid "To run the project do:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:102 +msgid "" +"You can also override the parameters defined in " +"``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:116 +msgid "" +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:157 +msgid "" +"We define the model as in the `centralized MLX example " +"`_, it's a " +"simple MLP:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:180 +msgid "" +"We also define some utility functions to test our model and to iterate " +"over batches." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:201 +msgid "" +"The main changes we have to make to use `MLX` with `Flower` will be found" +" in the ``get_params()`` and ``set_params()`` functions. Indeed, MLX " +"doesn't provide an easy way to convert the model parameters into a list " +"of ``np.array`` objects (the format we need for the serialization of the " +"messages to work)." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:206 +msgid "The way MLX stores its parameters is as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:219 +msgid "" +"Therefore, to get our list of ``np.array`` objects, we need to extract " +"each array and convert them into a NumPy array:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:228 +msgid "" +"For the ``set_params()`` function, we perform the reverse operation. We " +"receive a list of NumPy arrays and want to convert them into MLX " +"parameters. Therefore, we iterate through pairs of parameters and assign " +"them to the `weight` and `bias` keys of each layer dict:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:243 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:259 +msgid "" +"Here, after updating the parameters, we perform the training as in the " +"centralized case, and return the new parameters." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:262 +msgid "And for the ``evaluate()`` method of the client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:272 +msgid "" +"We also begin by updating the parameters with the ones sent by the " +"server, and then we compute the loss and accuracy using the functions " +"defined above. In the constructor of the ``FlowerClient`` we instantiate " +"the `MLP` model as well as other components such as the optimizer." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:277 +#, fuzzy +msgid "Putting everything together we have:" +msgstr "把所有东西放在一起" + +#: ../../source/tutorial-quickstart-mlx.rst:331 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that " +"``context`` enables you to get access to hyperparemeters defined in " +"``pyproject.toml`` to configure the run. In this tutorial we access, " +"among other hyperparameters, the ``local-epochs`` setting to control the " +"number of epochs a ``ClientApp`` will perform when running the ``fit()`` " +"method." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:363 +msgid "" +"To construct a ``ServerApp``, we define a ``server_fn()`` callback with " +"an identical signature to that of ``client_fn()``, but the return type is" +" `ServerAppComponents `_ as " +"opposed to `Client `_. In this example we use the " +"``FedAvg`` strategy." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:386 +#: ../../source/tutorial-quickstart-pytorch.rst:344 +#: ../../source/tutorial-quickstart-tensorflow.rst:266 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system." +msgstr "" + +#: ../../source/tutorial-quickstart-mlx.rst:390 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in ``examples" +"/quickstart-mlx`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 Pandas 执行联邦分析。" + +#: ../../source/tutorial-quickstart-pandas.rst:4 +msgid "Quickstart Pandas" +msgstr "快速入门Pandas" + +#: ../../source/tutorial-quickstart-pandas.rst:9 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "让我们使用 Pandas 和 Flower 建立一个联邦分析系统!" + +#: ../../source/tutorial-quickstart-pandas.rst:11 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" +"请参阅 `完整代码示例 `_\" 了解更多信息。" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch 在 MNIST 上训练 CNN 模型。" + +#: ../../source/tutorial-quickstart-pytorch.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+PyTorch project. It will" +" generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``PyTorch``), give a name to your project, " +"and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:117 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create dataloaders with the data" +" that correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:152 +msgid "" +"We defined a simple Convolutional Neural Network (CNN), but feel free to " +"replace it with a more sophisticated model if you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:177 +msgid "" +"In addition to defining the model architecture, we also include two " +"utility functions to perform both training (i.e. ``train()``) and " +"evaluation (i.e. ``test()``) using the above model. These functions " +"should look fairly familiar if you have some prior experience with " +"PyTorch. Note these functions do not have anything specific to Flower. " +"That being said, the training function will normally be called, as we'll " +"see later, from a Flower client passing its own data. In summary, your " +"clients can use standard training/testing functions to perform local " +"training or evaluation:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:226 +msgid "" +"The main changes we have to make to use `PyTorch` with `Flower` will be " +"found in the ``get_weights()`` and ``set_weights()`` functions. In " +"``get_weights()`` PyTorch model parameters are extracted and represented " +"as a list of NumPy arrays. The ``set_weights()`` function that's the " +"oposite: given a list of NumPy arrays it applies them to an existing " +"PyTorch model. Doing this in fairly easy in PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:282 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"`local-epochs` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additioinal hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:309 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will server as " +"the global model to federated. Note that the value of ``fraction_fit`` is" +" read from the run config. You can find the default value defined in the " +"``pyproject.toml``." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:348 +#, fuzzy +msgid "" +"Check the `source code `_ of the extended version of this tutorial in " +"``examples/quickstart-pytorch`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-pytorch.rst:354 +#: ../../source/tutorial-quickstart-tensorflow.rst:278 +#, fuzzy +msgid "Video tutorial" +msgstr "教程" + +#: ../../source/tutorial-quickstart-pytorch.rst:358 +msgid "" +"The video shown below shows how to setup a PyTorch + Flower project using" +" our previously recommended APIs. A new video tutorial will be released " +"that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:4 +msgid "Quickstart PyTorch Lightning" +msgstr "快速入门 PyTorch Lightning" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:6 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:19 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:42 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:93 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:115 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 scikit-learn 训练线性回归模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:4 +msgid "Quickstart scikit-learn" +msgstr "scikit-learn快速入门" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:9 +#, fuzzy +msgid "" +"In this tutorial, we will learn how to train a ``Logistic Regression`` " +"model on MNIST using Flower and scikit-learn." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 scikit-learn 在 MNIST 上训练一个 :code:`Logistic " +"Regression` 模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +#, fuzzy +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this :doc:`virtualenv `." +msgstr "" +"建议创建一个虚拟环境,并在此 `virtualenv `_ 中运行所有内容。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:15 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "我们的例子包括一个*服务器*和两个*客户端*,它们都有相同的模型。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:17 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。然后,这些参数更新将被发送到*服务器*,由*服务器*汇总后生成一个更新的全局模型。最后,*服务器*将这一改进版模型发回给每个*客户端*。一个完整的参数更新周期称为一*轮*。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:30 +#, fuzzy +msgid "Since we want to use scikit-learn, let's go ahead and install it:" +msgstr "既然我们要使用 scikt-learn,那就继续安装吧:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:36 +msgid "Or simply install all dependencies using Poetry:" +msgstr "或者直接使用 Poetry 安装所有依赖项:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +#, fuzzy +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within ``utils.py``. The " +"``utils.py`` contains different functions defining all the machine " +"learning basics:" +msgstr "" +"现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。不过,在设置客户端和服务器之前,我们将在 " +":code:`utils.py` 中定义联邦学习设置所需的所有功能。:code:`utils.py`包含定义所有机器学习基础知识的不同函数:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +#, fuzzy +msgid "``get_model_parameters()``" +msgstr ":code:`get_model_parameters()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +#, fuzzy +msgid "Returns the parameters of a ``sklearn`` LogisticRegression model" +msgstr "返回 :code:`sklearn` LogisticRegression 模型的参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +#, fuzzy +msgid "``set_model_params()``" +msgstr ":code:`set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +#, fuzzy +msgid "Sets the parameters of a ``sklearn`` LogisticRegression model" +msgstr "设置:code:`sklean`的LogisticRegression模型的参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +#, fuzzy +msgid "``set_initial_params()``" +msgstr ":code:`set_initial_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "初始化 Flower 服务器将要求的模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +#, fuzzy +msgid "" +"Please check out ``utils.py`` `here " +"`_ for more details. The pre-defined functions are used in" +" the ``client.py`` and imported. The ``client.py`` also requires to " +"import several packages such as Flower and scikit-learn:" +msgstr "" +"更多详情请查看 :code:`utils.py`` 这里 " +"`_。在 :code:`client.py` 中使用并导入了预定义函数。:code:`client.py` " +"还需要导入几个软件包,如 Flower 和 scikit-learn:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:75 +#, fuzzy +msgid "" +"Prior to local training, we need to load the MNIST dataset, a popular " +"image classification dataset of handwritten digits for machine learning, " +"and partition the dataset for FL. This can be conveniently achieved using" +" `Flower Datasets `_. The " +"``FederatedDataset.load_partition()`` method loads the partitioned " +"training set for each partition ID defined in the ``--partition-id`` " +"argument." +msgstr "" +"在本地训练之前,我们需要加载 MNIST 数据集(一个用于机器学习的流行手写数字图像分类数据集),并对数据集进行 FL 分区。使用 " +"\"Flower Datasets " +"`_\"可以方便地实现这一点。:code:`FederatedDataset.load_partition()`" +" 方法为 :code:`--partition-id` 参数中定义的每个分区 ID 加载分区训练集。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:106 +#, fuzzy +msgid "" +"Next, the logistic regression model is defined and initialized with " +"``utils.set_initial_params()``." +msgstr "接下来,使用 :code:`utils.set_initial_params()` 对逻辑回归模型进行定义和初始化。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:119 +#, fuzzy +msgid "" +"The Flower server interacts with clients through an interface called " +"``Client``. When the server selects a particular client for training, it " +"sends training instructions over the network. The client receives those " +"instructions and calls one of the ``Client`` methods to run your code " +"(i.e., to fit the logistic regression we defined earlier)." +msgstr "" +"Flower 服务器通过一个名为 :code:`Client` " +"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" +" 方法之一来运行您的代码(即拟合我们之前定义的逻辑回归)。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:124 +#, fuzzy +msgid "" +"Flower provides a convenience class called ``NumPyClient`` which makes it" +" easier to implement the ``Client`` interface when your workload uses " +"scikit-learn. Implementing ``NumPyClient`` usually means defining the " +"following methods (``set_parameters`` is optional though):" +msgstr "" +"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当你的工作负载使用 scikit-learn " +"时,它可以让你更容易地实现 :code:`Client` 接口。实现 :code:`NumPyClient` " +"通常意味着定义以下方法(:code:`set_parameters` 是可选的):" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:130 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "以 NumPy ndarrays 列表形式返回模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +#, fuzzy +msgid "``set_parameters`` (optional)" +msgstr ":code:`set_parameters` (可选)" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:132 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "用从服务器接收到的参数更新本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:133 +#, fuzzy +msgid "is directly imported with ``utils.set_model_params()``" +msgstr "直接导入 :code:`utils.set_model_params()`" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:135 +msgid "set the local model weights" +msgstr "设置本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:136 +msgid "train the local model" +msgstr "训练本地模型" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:137 +#, fuzzy +msgid "return the updated local model weights" +msgstr "接收更新的本地模型参数" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:139 +msgid "test the local model" +msgstr "测试本地模型" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:141 +msgid "The methods can be implemented in the following way:" +msgstr "这些方法可以通过以下方式实现:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:163 +#, fuzzy +msgid "" +"We can now create an instance of our class ``MnistClient`` and add one " +"line to actually run this client:" +msgstr "现在我们可以创建一个 :code:`MnistClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:170 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` or " +"``NumPyClient`` and call ``fl.client.start_client()``. If you implement a" +" client of type ``NumPyClient`` you'll need to first call its " +"``to_client()`` method. The string ``\"0.0.0.0:8080\"`` tells the client " +"which server to connect to. In our case we can run the server and the " +"client on the same machine, therefore we use ``\"0.0.0.0:8080\"``. If we " +"run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" pass to the client." +msgstr "" +"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " +":code:`fl.client.start_client()` 或 " +":code:`fl.client.start_numpy_client()`。字符串 " +":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +" :code:`server_address`。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:181 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" +"下面的 Flower 服务器更先进一些,会返回一个用于服务器端评估的评估函数。首先,我们再次导入所有需要的库,如 Flower 和 scikit-" +"learn。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:185 +#, fuzzy +msgid "``server.py``, import Flower and start the server:" +msgstr ":code:`server.py`, 导入 Flower 并启动服务器:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +#, fuzzy +msgid "" +"The number of federated learning rounds is set in ``fit_round()`` and the" +" evaluation is defined in ``get_evaluate_fn()``. The evaluation function " +"is called after each federated learning round and gives you information " +"about loss and accuracy. Note that we also make use of Flower Datasets " +"here to load the test split of the MNIST dataset for server-side " +"evaluation." +msgstr "" +"联邦学习轮数在 :code:`fit_round()` 中设置,评估在 :code:`get_evaluate_fn()` " +"中定义。每轮联邦学习后都会调用评估函数,并提供有关损失值和准确率的信息。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:228 +#, fuzzy +msgid "" +"The ``main`` contains the server-side parameter initialization " +"``utils.set_initial_params()`` as well as the aggregation strategy " +"``fl.server.strategy:FedAvg()``. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" ``fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))``." +msgstr "" +":code:`main`包含服务器端参数初始化:code:`utils.set_initial_params()`以及聚合策略 " +":code:`fl.server.strategy:FedAvg()`。该策略是默认的联邦平均(或 " +"FedAvg)策略,有两个客户端,在每轮联邦学习后进行评估。可以使用 " +":code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))` 命令启动服务器。" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:256 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:264 +#: ../../source/tutorial-quickstart-xgboost.rst:598 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +#: ../../source/tutorial-quickstart-xgboost.rst:605 +msgid "Open another terminal and start the second client:" +msgstr "打开另一台终端,启动第二个客户端:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:277 +#: ../../source/tutorial-quickstart-xgboost.rst:611 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "每个客户端都有自己的数据集。现在你应该看到第一个终端(启动服务器的终端)的训练效果了:" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:311 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in ``examples/sklearn-logreg-" +"mnist``." +msgstr "" +"恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +"`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#, fuzzy +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a CNN model on CIFAR-10." +msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:4 +msgid "Quickstart TensorFlow" +msgstr "快速入门 TensorFlow" + +#: ../../source/tutorial-quickstart-tensorflow.rst:6 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:11 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:26 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +msgid "" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:141 +msgid "" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:170 +msgid "" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:203 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:234 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:270 +#, fuzzy +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-tensorflow.rst:282 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 XGBoost 上训练分类模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:4 +msgid "Quickstart XGBoost" +msgstr "XGBoost快速入门" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "Federated XGBoost" +msgstr "联邦化 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:15 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" +"EXtreme Gradient " +"Boosting(**XGBoost**)是梯度提升决策树(**GBDT**)的一种稳健而高效的实现方法,能最大限度地提高提升树方法的计算边界。它主要用于提高机器学习模型的性能和计算速度。在" +" XGBoost 中,决策树是并发构建的,与 GBDT 采用的顺序方法不同。" + +#: ../../source/tutorial-quickstart-xgboost.rst:21 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "对于训练示例少于 10k 的中型数据集上的表格数据,XGBoost 的结果往往超过深度学习技术。" + +#: ../../source/tutorial-quickstart-xgboost.rst:25 +msgid "Why federated XGBoost?" +msgstr "为什么选择联邦 XGBoost?" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "事实上,随着对数据隐私和分散学习的需求不断增长,越来越多的专业应用(如生存分析和金融欺诈检测)需要实施联邦 XGBoost 系统。" + +#: ../../source/tutorial-quickstart-xgboost.rst:31 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" +"联邦学习可确保原始数据保留在本地设备上,因此对于数据安全和隐私至关重要的敏感领域来说,这是一种极具吸引力的方法。鉴于 XGBoost " +"的稳健性和高效性,将其与联邦学习相结合为应对这些特定挑战提供了一种前景广阔的解决方案。" + +#: ../../source/tutorial-quickstart-xgboost.rst:36 +#, fuzzy +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and ``xgboost`` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" +"在本教程中,我们将学习如何使用 Flower 和 :code:`xgboost` 软件包在 HIGGS 数据集上训练联邦 XGBoost " +"模型。我们将使用一个包含两个 * 客户端* 和一个 * 服务器* 的简单示例 (`完整代码 xgboost-quickstart " +"`_)来演示联邦 XGBoost 如何工作,然后我们将深入到一个更复杂的示例 (`完整代码 xgboost-" +"comprehensive `_),以运行各种实验。" + +#: ../../source/tutorial-quickstart-xgboost.rst:46 +msgid "Environment Setup" +msgstr "环境设定" + +#: ../../source/tutorial-quickstart-xgboost.rst:48 +#, fuzzy +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-xgboost.rst:51 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "我们首先需要安装 Flower 和 Flower Datasets。您可以通过运行 :" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +#, fuzzy +msgid "" +"Since we want to use ``xgboost`` package to build up XGBoost trees, let's" +" go ahead and install ``xgboost``:" +msgstr "既然我们要使用 :code:`xgboost` 软件包来构建 XGBoost 树,那就继续安装 :code:`xgboost`:" + +#: ../../source/tutorial-quickstart-xgboost.rst:67 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "*客户端*负责根据其本地数据集为模型生成单独的模型参数更新。现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。" + +#: ../../source/tutorial-quickstart-xgboost.rst:71 +#, fuzzy +msgid "" +"In a file called ``client.py``, import xgboost, Flower, Flower Datasets " +"and other related functions:" +msgstr "在名为 :code:`client.py` 的文件中,导入 xgboost、Flower、Flower Datasets 和其他相关函数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "Dataset partition and hyper-parameter selection" +msgstr "数据集划分和超参数选择" + +#: ../../source/tutorial-quickstart-xgboost.rst:101 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "在本地训练之前,我们需要从 Flower Datasets 加载 HIGGS 数据集,并对 FL 进行数据分区:" + +#: ../../source/tutorial-quickstart-xgboost.rst:115 +#, fuzzy +msgid "" +"In this example, we split the dataset into 30 partitions with uniform " +"distribution (``IidPartitioner(num_partitions=30)``). Then, we load the " +"partition for the given client based on ``partition_id``:" +msgstr "" +"在此示例中,我们将数据集分割成两个均匀分布的分区(:code:`IidPartitioner(num_partitions=2)`)。然后,我们根据" +" :code:`node_id` 为给定客户端加载分区:" + +#: ../../source/tutorial-quickstart-xgboost.rst:135 +#, fuzzy +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for ``xgboost`` package." +msgstr "然后,我们在给定的分区(客户端的本地数据)上进行训练/测试分割,并为 :code:`xgboost` 软件包转换数据格式。" + +#: ../../source/tutorial-quickstart-xgboost.rst:149 +#, fuzzy +msgid "" +"The functions of ``train_test_split`` and " +"``transform_dataset_to_dmatrix`` are defined as below:" +msgstr ":code:`train_test_split` 和 :code:`transform_dataset_too_dmatrix` 的函数定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:174 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "最后,我们定义了用于 XGBoost 训练的超参数。" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +#, fuzzy +msgid "" +"The ``num_local_round`` represents the number of iterations for local " +"tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting ``tree_method`` to ``gpu_hist``. We use AUC as evaluation " +"metric." +msgstr "" +"代码:`num_local_round`表示本地树的迭代次数。我们默认使用 CPU 进行训练。可以通过将 :code:`tree_method` " +"设置为 :code:`gpu_hist`,将其转换为 GPU。我们使用 AUC 作为评估指标。" + +#: ../../source/tutorial-quickstart-xgboost.rst:195 +msgid "Flower client definition for XGBoost" +msgstr "用于 XGBoost 的 Flower 客户端定义" + +#: ../../source/tutorial-quickstart-xgboost.rst:197 +#, fuzzy +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define ``XgbClient`` class inherited from " +"``fl.client.Client``." +msgstr "" +"加载数据集后,我们定义 Flower 客户端。我们按照一般规则定义从 :code:`fl.client.Client` 继承而来的 " +":code:`XgbClient` 类。" + +#: ../../source/tutorial-quickstart-xgboost.rst:219 +msgid "" +"All required parameters defined above are passed to ``XgbClient``'s " +"constructor." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:221 +#, fuzzy +msgid "" +"Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods " +"insides ``XgbClient`` class as follows." +msgstr "" +"然后,我们在 :code:`XgbClient` 类中重写 :code:`get_parameters`、:code:`fit` 和 " +":code:`evaluate` 方法如下。" + +#: ../../source/tutorial-quickstart-xgboost.rst:236 +#, fuzzy +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use ``get_parameters`` " +"and ``set_parameters`` to initialise model parameters for XGBoost. As a " +"result, let's return an empty tensor in ``get_parameters`` when it is " +"called by the server at the first round." +msgstr "" +"与神经网络训练不同,XGBoost 树不是从指定的随机参数开始的。在这种情况下,我们不使用 :code:`get_parameters` 和 " +":code:`set_parameters` 来初始化 XGBoost 的模型参数。因此,当服务器在第一轮调用 " +":code:`get_parameters` 时,让我们在 :code:`get_parameters` 中返回一个空张量。" + +#: ../../source/tutorial-quickstart-xgboost.rst:278 +#, fuzzy +msgid "" +"In ``fit``, at the first round, we call ``xgb.train()`` to build up the " +"first set of trees. From the second round, we load the global model sent " +"from server to new build Booster object, and then update model weights on" +" local training data with function ``local_boost`` as follows:" +msgstr "" +"在 :code:`fit`中,第一轮我们调用 :code:`xgb.train()`来建立第一组树,返回的 Booster 对象和 config " +"分别存储在 :code:`self.bst` 和 :code:`self.config` 中。从第二轮开始,我们将服务器发送的全局模型加载到 " +":code:`self.bst`,然后使用函数 :code:`local_boost`更新本地训练数据的模型权重,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:298 +#, fuzzy +msgid "" +"Given ``num_local_round``, we update trees by calling " +"``bst_input.update`` method. After training, the last " +"``N=num_local_round`` trees will be extracted to send to the server." +msgstr "" +"给定 :code:`num_local_round`,我们通过调用 " +":code:`self.bst.update`方法更新树。训练结束后,我们将提取最后一个 :code:`N=num_local_round` " +"树并发送给服务器。" + +#: ../../source/tutorial-quickstart-xgboost.rst:330 +#, fuzzy +msgid "" +"In ``evaluate``, after loading the global model, we call ``bst.eval_set``" +" function to conduct evaluation on valid set. The AUC value will be " +"returned." +msgstr "在 :code:`evaluate`中,我们调用 :code:`self.bst.eval_set`函数对有效集合进行评估。将返回 AUC 值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:333 +#, fuzzy +msgid "" +"Now, we can create an instance of our class ``XgbClient`` and add one " +"line to actually run this client:" +msgstr "现在,我们可以创建一个 :code:`XgbClient` 类的实例,并添加一行来实际运行该客户端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:350 +#, fuzzy +msgid "" +"That's it for the client. We only have to implement ``Client`` and call " +"``fl.client.start_client()``. The string ``\"[::]:8080\"`` tells the " +"client which server to connect to. In our case we can run the server and " +"the client on the same machine, therefore we use ``\"[::]:8080\"``. If we" +" run a truly federated workload with the server and clients running on " +"different machines, all that needs to change is the ``server_address`` we" +" point the client at." +msgstr "" +"这就是客户端。我们只需实现 :code:`客户端`并调用 :code:`fl.client.start_client()`。字符串 " +":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +":code:`\"[::]:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " +":code:`server_address`。" + +#: ../../source/tutorial-quickstart-xgboost.rst:360 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" +"然后,这些更新会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将这个改进版的模型发回给每个*客户端*,以完成一轮完整的" +" FL。" + +#: ../../source/tutorial-quickstart-xgboost.rst:364 +#, fuzzy +msgid "" +"In a file named ``server.py``, import Flower and FedXgbBagging from " +"``flwr.server.strategy``." +msgstr "" +"在名为 :code:`server.py` 的文件中,从 :code:`flwr.server.strategy` 导入 Flower 和 " +"FedXgbBagging。" + +#: ../../source/tutorial-quickstart-xgboost.rst:367 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "我们首先定义了 XGBoost bagging聚合策略。" + +#: ../../source/tutorial-quickstart-xgboost.rst:401 +#, fuzzy +msgid "" +"We use two clients for this example. An ``evaluate_metrics_aggregation`` " +"function is defined to collect and wighted average the AUC values from " +"clients. The ``config_func`` function is to return the current FL round " +"number to client's ``fit()`` and ``evaluate()`` methods." +msgstr "" +"本示例使用两个客户端。我们定义了一个 :code:`evaluate_metrics_aggregation` 函数,用于收集客户机的 AUC " +"值并求取平均值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:406 +msgid "Then, we start the server:" +msgstr "然后,我们启动服务器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:418 +msgid "Tree-based bagging aggregation" +msgstr "基于树的bagging聚合" + +#: ../../source/tutorial-quickstart-xgboost.rst:420 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "您一定很好奇bagging聚合是如何工作的。让我们来详细了解一下。" + +#: ../../source/tutorial-quickstart-xgboost.rst:422 +#, fuzzy +msgid "" +"In file ``flwr.server.strategy.fedxgb_bagging.py``, we define " +"``FedXgbBagging`` inherited from ``flwr.server.strategy.FedAvg``. Then, " +"we override the ``aggregate_fit``, ``aggregate_evaluate`` and " +"``evaluate`` methods as follows:" +msgstr "" +"在文件 :code:`flwr.server.strategy.fedxgb_bagging.py`中,我们定义了从 " +":code:`flwr.server.strategy.FedAvg`继承的 :code:`FedXgbBagging`。然后,我们覆盖 " +":code:`aggregate_fit`、:code:`aggregate_evaluate` 和 :code:`evaluate` 方法如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:519 +#, fuzzy +msgid "" +"In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost " +"trees by calling ``aggregate()`` function:" +msgstr "" +"在 :code:`aggregate_fit` 中,我们通过调用 :code:`aggregate()` 函数,按顺序聚合客户端的 XGBoost" +" 树:" + +#: ../../source/tutorial-quickstart-xgboost.rst:579 +#, fuzzy +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +"``_get_tree_nums``. Then, the fetched information will be aggregated. " +"After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" +"在该函数中,我们首先通过调用 :code:`_get_tree_nums` " +"获取当前模型和上一个模型的树数和并行树数。然后,对获取的信息进行聚合。然后,聚合树(包含模型参数)生成新的树模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:584 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "在遍历所有客户端的模型后,会生成一个新的全局模型,然后进行序列化,并发回给每个客户端。" + +#: ../../source/tutorial-quickstart-xgboost.rst:588 +msgid "Launch Federated XGBoost!" +msgstr "启动联邦 XGBoost!" + +#: ../../source/tutorial-quickstart-xgboost.rst:664 +#, fuzzy +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in ``metrics_distributed``." +" One can see that the average AUC increases over FL rounds." +msgstr "" +"恭喜您!您已成功构建并运行了第一个联邦 XGBoost 系统。可以在 :code:`metrics_distributed` 中查看 AUC " +"值。我们可以看到,平均 AUC 随 FL 轮数的增加而增加。" + +#: ../../source/tutorial-quickstart-xgboost.rst:668 +#, fuzzy +msgid "" +"The full `source code `_ for this example can be found in ``examples" +"/xgboost-quickstart``." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Comprehensive Federated XGBoost" +msgstr "综合的联邦 XGBoost" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +#, fuzzy +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including aggregation strategies, data partitioning and " +"centralised/distributed evaluation. We also support :doc:`Flower " +"simulation ` making it easy to simulate large " +"client cohorts in a resource-aware manner. Let's take a look!" +msgstr "" +"既然您已经知道联合 XGBoost 如何与 Flower 协同工作,那么现在就该通过自定义实验设置来运行一些更综合的实验了。在 xgboost-" +"comprehensive 示例 (`完整代码 " +"`_)中,我们提供了更多选项来定义各种实验设置,包括数据分区和集中/分布式评估。让我们一起来看看!" + +#: ../../source/tutorial-quickstart-xgboost.rst:685 +#, fuzzy +msgid "Cyclic training" +msgstr "集中式训练" + +#: ../../source/tutorial-quickstart-xgboost.rst:687 +#, fuzzy +msgid "" +"In addition to bagging aggregation, we offer a cyclic training scheme, " +"which performs FL in a client-by-client fashion. Instead of aggregating " +"multiple clients, there is only one single client participating in the " +"training per round in the cyclic training scenario. The trained local " +"XGBoost trees will be passed to the next client as an initialised model " +"for next round's boosting." +msgstr "" +"除了袋式聚合,我们还提供了一种循环训练方案,它以逐个客户端的方式执行 " +"FL。在循环训练方案中,每轮只有一个客户端参与训练,而不是多个客户端聚合在一起。训练好的本地 XGBoost " +"树将传递给下一个客户端,作为下一轮提升的初始化模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:693 +#, fuzzy +msgid "To do this, we first customise a ``ClientManager`` in ``server_utils.py``:" +msgstr "为此,我们首先要在 :code:`server_utils.py` 中自定义一个 :code:`ClientManager`:" + +#: ../../source/tutorial-quickstart-xgboost.rst:733 +#, fuzzy +msgid "" +"The customised ``ClientManager`` samples all available clients in each FL" +" round based on the order of connection to the server. Then, we define a " +"new strategy ``FedXgbCyclic`` in " +"``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially " +"select only one client in given round and pass the received model to next" +" client." +msgstr "" +"定制的 :code:`ClientManager` 会根据连接服务器的顺序,在每轮 FL 中对所有可用客户端进行采样。然后,我们在 " +":code:`flwr.server.strategy.fedxgb_cyclic.py`\"中定义了一个新策略 " +":code:`FedXgbCyclic`,以便在给定回合中按顺序只选择一个客户端,并将接收到的模型传递给下一个客户端。" + +#: ../../source/tutorial-quickstart-xgboost.rst:775 +#, fuzzy +msgid "" +"Unlike the original ``FedAvg``, we don't perform aggregation here. " +"Instead, we just make a copy of the received client model as global model" +" by overriding ``aggregate_fit``." +msgstr "" +"与最初的 :code:`FedAvg` 不同,我们在这里不执行聚合。相反,我们只是通过覆盖 :code:`aggregate_fit` " +"将接收到的客户端模型复制为全局模型。" + +#: ../../source/tutorial-quickstart-xgboost.rst:778 +#, fuzzy +msgid "" +"Also, the customised ``configure_fit`` and ``configure_evaluate`` methods" +" ensure the clients to be sequentially selected given FL round:" +msgstr "" +"此外,定制的 :code:`configure_fit` 和 :code:`configure_evaluate` 方法可确保在 FL " +"轮中按顺序选择客户:" + +#: ../../source/tutorial-quickstart-xgboost.rst:840 +msgid "Customised data partitioning" +msgstr "定制数据分区" + +#: ../../source/tutorial-quickstart-xgboost.rst:842 +#, fuzzy +msgid "" +"In ``dataset.py``, we have a function ``instantiate_partitioner`` to " +"instantiate the data partitioner based on the given ``num_partitions`` " +"and ``partitioner_type``. Currently, we provide four supported " +"partitioner type to simulate the uniformity/non-uniformity in data " +"quantity (uniform, linear, square, exponential)." +msgstr "" +"在 :code:`dataset.py` 中,我们有一个函数 :code:`instantiate_partitioner` 来根据给定的 " +":code:`num_partitions` 和 :code:`partitioner_type` " +"来实例化数据分区器。目前,我们提供四种支持的分区器类型(均匀、线性、正方形、指数)来模拟数据量的均匀性/非均匀性。" + +#: ../../source/tutorial-quickstart-xgboost.rst:873 +msgid "Customised centralised/distributed evaluation" +msgstr "定制的集中/分布式评估" + +#: ../../source/tutorial-quickstart-xgboost.rst:875 +#, fuzzy +msgid "" +"To facilitate centralised evaluation, we define a function in " +"``server_utils.py``:" +msgstr "为便于集中评估,我们在 :code:`server.py` 中定义了一个函数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:907 +#, fuzzy +msgid "" +"This function returns a evaluation function which instantiates a " +"``Booster`` object and loads the global model weights to it. The " +"evaluation is conducted by calling ``eval_set()`` method, and the tested " +"AUC value is reported." +msgstr "" +"此函数返回一个评估函数,该函数实例化一个 :code:`Booster` 对象,并向其加载全局模型参数。评估通过调用 " +":code:`eval_set()` 方法进行,并报告测试的 AUC 值。" + +#: ../../source/tutorial-quickstart-xgboost.rst:911 +#, fuzzy +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the ``evaluate()`` method insides the " +"``XgbClient`` class in ``client_utils.py``." +msgstr "" +"至于客户端上的分布式评估,与快速启动示例相同,通过覆盖 :code:`client.py` 中 :code:`XgbClient` 类内部的 " +":code:`evaluate()` 方法。" + +#: ../../source/tutorial-quickstart-xgboost.rst:916 +#, fuzzy +msgid "Flower simulation" +msgstr "运行模拟" + +#: ../../source/tutorial-quickstart-xgboost.rst:918 +#, fuzzy +msgid "" +"We also provide an example code (``sim.py``) to use the simulation " +"capabilities of Flower to simulate federated XGBoost training on either a" +" single machine or a cluster of machines." +msgstr "我们还提供了一个示例代码(:code:`sim.py`),用于使用 Flower 的模拟功能在单台机器或机器集群上模拟联合 XGBoost 训练。" + +#: ../../source/tutorial-quickstart-xgboost.rst:954 +#, fuzzy +msgid "" +"After importing all required packages, we define a ``main()`` function to" +" perform the simulation process:" +msgstr "导入所有需要的软件包后,我们定义了一个 :code:`main()` 函数来执行模拟程序:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1010 +#, fuzzy +msgid "" +"We first load the dataset and perform data partitioning, and the pre-" +"processed data is stored in a ``list``. After the simulation begins, the " +"clients won't need to pre-process their partitions again." +msgstr "我们首先加载数据集并执行数据分区,预处理后的数据存储在 :code:`list` 中。模拟开始后,客户端就不需要再预处理分区了。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1014 +#, fuzzy +msgid "Then, we define the strategies and other hyper-parameters:" +msgstr "然后,我们定义策略和其他超参数:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1065 +#, fuzzy +msgid "" +"After that, we start the simulation by calling " +"``fl.simulation.start_simulation``:" +msgstr "然后,我们调用 :code:`fl.simulation.start_simulation` 开始模拟:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1085 +#, fuzzy +msgid "" +"One of key parameters for ``start_simulation`` is ``client_fn`` which " +"returns a function to construct a client. We define it as follows:" +msgstr "" +":code:`start_simulation` 的一个关键参数是 " +":code:`client_fn`,它返回一个用于构建客户端的函数。我们将其定义如下:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1126 +msgid "Arguments parser" +msgstr "参数解析器" + +#: ../../source/tutorial-quickstart-xgboost.rst:1128 +#, fuzzy +msgid "" +"In ``utils.py``, we define the arguments parsers for clients, server and " +"simulation, allowing users to specify different experimental settings. " +"Let's first see the sever side:" +msgstr "在 :code:`utils.py` 中,我们定义了客户端和服务器端的参数解析器,允许用户指定不同的实验设置。让我们先看看服务器端:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1175 +#, fuzzy +msgid "" +"This allows user to specify training strategies / the number of total " +"clients / FL rounds / participating clients / clients for evaluation, and" +" evaluation fashion. Note that with ``--centralised-eval``, the sever " +"will do centralised evaluation and all functionalities for client " +"evaluation will be disabled." +msgstr "" +"这允许用户指定总客户数/FL 轮数/参与客户数/评估客户数以及评估方式。请注意,如果使用 :code:`--centralised-" +"eval`,服务器将进行集中评估,客户端评估的所有功能将被禁用。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1180 +msgid "Then, the argument parser on client side:" +msgstr "然后是客户端的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1234 +#, fuzzy +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have an option to conduct evaluation on centralised test set" +" by setting ``--centralised-eval``, as well as an option to perform " +"scaled learning rate based on the number of clients by setting " +"``--scaled-lr``." +msgstr "这定义了客户端数据分区的各种选项。此外,通过设置 :code:`-centralised-eval`,客户端还可以选择在集中测试集上进行评估。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1239 +#, fuzzy +msgid "We also have an argument parser for simulation:" +msgstr "我们还有一个用于模拟的参数解析器:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1317 +#, fuzzy +msgid "This integrates all arguments for both client and server sides." +msgstr "这整合了客户端和服务器端的所有参数。" + +#: ../../source/tutorial-quickstart-xgboost.rst:1320 +msgid "Example commands" +msgstr "命令示例" + +#: ../../source/tutorial-quickstart-xgboost.rst:1322 +#, fuzzy +msgid "" +"To run a centralised evaluated experiment with bagging strategy on 5 " +"clients with exponential distribution for 50 rounds, we first start the " +"server as below:" +msgstr "为了在 5 个客户端上进行 50 轮指数分布的集中评估实验,我们首先启动服务器,如下所示:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1329 +msgid "Then, on each client terminal, we start the clients:" +msgstr "然后,我们在每个客户终端上启动客户机:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1335 +#, fuzzy +msgid "To run the same experiment with Flower simulation:" +msgstr "运行与 Flower 模拟相同的实验:" + +#: ../../source/tutorial-quickstart-xgboost.rst:1341 +#, fuzzy +msgid "" +"The full `code `_ for this comprehensive example can be found in" +" ``examples/xgboost-comprehensive``." +msgstr "" +"此综合示例的全部`源代码 `_ 可在 :code:`examples/xgboost-comprehensive` 中找到。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "从零开始制定策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +#, fuzzy +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and the Flower framework (`part 1 " +"`__) and we learned how strategies can be used to customize " +"the execution on both the server and the clients (`part 2 " +"`__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第三部分。在本教程的前几部分,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),并学习了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__)。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg using " +"the Flower framework, Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将通过创建 FedAvg 的自定义版本(再次使用 `Flower `__ 和 " +"`PyTorch `__),继续定制我们之前构建的联邦学习系统。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +#, fuzzy +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Flower Discuss and the Flower Slack to connect, " +"ask questions, and get help: - `Join Flower Discuss " +"`__ We'd love to hear from you in the " +"``Introduction`` topic! If anything is unclear, post in ``Flower Help - " +"Beginners``. - `Join Flower Slack `__ We'd " +"love to hear from you in the ``#introductions`` channel! If anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼 " +"我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:18 +#, fuzzy +msgid "Let's build a new ``Strategy`` from scratch! 🌼" +msgstr "让我们从头开始构建一个新的``Strategy``!" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:30 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:30 +msgid "Preparation" +msgstr "准备工作" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:32 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:33 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:32 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "在开始实际代码之前,让我们先确保我们已经准备好了所需的一切。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:44 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:45 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:44 +msgid "Installing dependencies" +msgstr "安装依赖项" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:46 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:47 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:46 +msgid "First, we install the necessary packages:" +msgstr "首先,我们安装必要的软件包:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:66 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:67 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:66 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:66 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "现在我们已经安装了所有依赖项,可以导入本教程所需的所有内容:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:106 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:106 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:106 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:119 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:119 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:119 +msgid "Data loading" +msgstr "数据加载" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:121 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:163 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:163 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "Model training/evaluation" +msgstr "模型培训/评估" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:165 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:165 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:171 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "让我们继续使用常见的模型定义(包括 `set_parameters` 和 `get_parameters`)、训练和测试函数:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:256 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:262 +msgid "Flower client" +msgstr "Flower 客户端" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:264 +#, fuzzy +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``partition_id`` to the client and use it log additional details. We then" +" create an instance of ``ClientApp`` and pass it the ``client_fn``." +msgstr "" +"为了实现 Flower 客户端,我们(再次)创建了 ``flwr.client.NumPyClient`` 的子类,并实现了 " +"``get_parameters``、``fit`` 和 ``evaluate``三个方法。在这里,我们还将 ``cid`` " +"传递给客户端,并使用它记录其他详细信息:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:311 +msgid "Let's test what we have so far before we continue:" +msgstr "在继续之前,让我们先测试一下我们目前掌握的情况:" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:357 +msgid "Build a Strategy from scratch" +msgstr "从零开始构建策略" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:359 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" +"让我们重写 ``configure_fit`` 方法,使其向一部分客户的优化器传递更高的学习率(可能还有其他超参数)。我们将保持 " +"``FedAvg`` 中的客户端采样,然后更改配置字典(``FitIns`` 属性之一)。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:523 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "剩下的唯一工作就是在启动实验时使用新创建的自定义策略 ``FedCustom`` :" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:559 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:998 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:841 +msgid "Recap" +msgstr "回顾" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:561 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" +"在本笔记中,我们了解了如何实施自定义策略。自定义策略可以对客户端节点配置、结果聚合等进行细粒度控制。要定义自定义策略,只需覆盖(抽象)基类 " +"``Strategy`` " +"的抽象方法即可。为使自定义策略更加强大,您可以将自定义函数传递给新类的构造函数(`__init__``),然后在需要时调用这些函数。" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:575 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1014 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:813 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:859 +#, fuzzy +msgid "" +"Before you continue, make sure to join the Flower community on Flower " +"Discuss (`Join Flower Discuss `__) and on " +"Slack (`Join Slack `__)." +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:577 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1016 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:815 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:861 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "如果您需要帮助,我们有专门的 ``#questions`` 频道,但我们也很乐意在 ``#introductions`` 中了解您是谁!" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:579 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" +"Flower联邦学习教程 - 第4部分 `__ 介绍了``Client``,它是``NumPyClient``底层的灵活应用程序接口。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "自定义客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" +"欢迎来到 Flower 联邦学习教程的第四部分。在本教程的前几部分中,我们介绍了 PyTorch 和 Flower 的联邦学习(`part 1 " +"`__),了解了如何使用策略来定制服务器和客户端的执行(`part 2 " +"`__),并从头开始构建了我们自己的定制策略(`part 3 " +"`__)。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" +"在本笔记中,我们将重温 ``NumPyClient`` 并引入一个用于构建客户端的新基类,简单命名为 " +"``Client``。在本教程的前几部分中,我们的客户端基于``NumPyClient``,这是一个便捷类,可以让我们轻松地与具有良好 NumPy" +" 互操作性的机器学习库协同工作。有了 ``Client``,我们获得了很多以前没有的灵活性,但我们也必须做一些以前不需要做的事情。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:19 +#, fuzzy +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``! 🌼" +msgstr "让我们深入了解一下从 ``NumPyClient`` 到 ``Client`` 的过程!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:31 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:30 +msgid "Step 0: Preparation" +msgstr "步骤 0:准备工作" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now define a loading function for the CIFAR-10 training and test " +"set, partition them into ``num_partitions`` smaller datasets (each split " +"into training and validation set), and wrap everything in their own " +"``DataLoader``." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成十个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 " +"``DataLoader`` 中。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:256 +msgid "Step 1: Revisiting NumPyClient" +msgstr "步骤 1:重温 NumPyClient" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:258 +#, fuzzy +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``." +msgstr "" +"到目前为止,我们通过子类化 ``flwr.client.NumPyClient`` " +"实现了我们的客户端。我们实现了三个方法:``get_parameters``, ``fit`, 和``evaluate``。最后,我们用一个名为 " +"``client_fn`` 的函数来创建该类的实例:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:299 +msgid "" +"Then, we define the function ``numpyclient_fn`` that is used by Flower to" +" create the ``FlowerNumpyClient`` instances on demand. Finally, we create" +" the ``ClientApp`` and pass the ``numpyclient_fn`` to it." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:328 +#, fuzzy +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Next, we configure the number of federated learning " +"rounds using ``ServerConfig`` and create the ``ServerApp`` with this " +"config:" +msgstr "" +"我们以前见过这种情况,目前没有什么新东西。与之前的笔记相比,唯一*小*的不同是命名,我们把 ``FlowerClient`` 改成了 " +"``FlowerNumPyClient``,把 `client_fn` 改成了 ``numpyclient_fn``。让我们运行它看看输出结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:355 +msgid "" +"Finally, we specify the resources for each client and run the simulation " +"to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:389 +#, fuzzy +msgid "" +"This works as expected, ten clients are training for three rounds of " +"federated learning." +msgstr "结果不出所料,两个客户端正在进行三轮联邦学习训练。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:391 +#, fuzzy +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``run_simulation`` launches the ``ClientApp`` object which in turn calls " +"the function ``numpyclient_fn`` to create an instance of our " +"``FlowerNumPyClient`` (along with loading the model and the data)." +msgstr "" +"让我们再深入一点,讨论一下 Flower 是如何执行模拟的。每当一个客户端被选中进行工作时,`start_simulation`` 就会调用函数 " +"`numpyclient_fn` 来创建我们的 ``FlowerNumPyClient`` 实例(同时加载模型和数据)。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:393 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" +"但令人惊讶的部分也许就在这里: Flower 实际上并不直接使用 ``FlowerNumPyClient`` " +"对象。相反,它封装了该对象,使其看起来像 ``flwr.client.Client`` 的子类,而不是 " +"``flwr.client.NumPyClient``。事实上,Flower 核心框架不知道如何处理 " +"``NumPyClient``,它只知道如何处理 ``Client``。``NumPyClient`` " +"只是建立在``Client``之上的便捷抽象类。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:395 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "与其在 ``NumPyClient`` 上构建,我们可以直接在 ``Client`` 上构建。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:407 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "步骤 2:从 ``NumPyClient`` 移至 ``Client``" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:409 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "让我们尝试使用 ``Client`` 代替 ``NumPyClient`` 做同样的事情。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:519 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "在详细讨论代码之前,让我们试着运行它!必须确保我们基于 ``Client`` 的新客户端能正常运行,对吗?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:545 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "就是这样,我们现在开始使用 ``Client``。它看起来可能与我们使用 ``NumPyClient`` 所做的类似。那么有什么不同呢?" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:547 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" +"首先,它的代码更多。但为什么呢?区别在于 ``Client`` 希望我们处理参数的序列化和反序列化。Flower " +"要想通过网络发送参数,最终需要将这些参数转化为 ``字节``。把参数(例如 NumPy 的 ``ndarray`` " +"参数)变成原始字节叫做序列化。将原始字节转换成更有用的东西(如 NumPy ``ndarray`)称为反序列化。Flower " +"需要同时做这两件事:它需要在服务器端序列化参数并将其发送到客户端,客户端需要反序列化参数以便将其用于本地训练,然后再次序列化更新后的参数并将其发送回服务器,服务器(最后)再次反序列化参数以便将其与从其他客户端接收到的更新汇总在一起。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:550 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" +"Client 与 NumPyClient 之间的唯一**真正区别在于,NumPyClient " +"会为你处理序列化和反序列化。NumPyClient之所以能做到这一点,是因为它预计你会以NumPy " +"ndarray的形式返回参数,而且它知道如何处理这些参数。这使得与具有良好 NumPy 支持的大多数机器学习库一起工作变得轻而易举。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:552 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" +"在 API 方面,有一个主要区别:Client 中的所有方法都只接受一个参数(例如,``Client.fit`` 中的 " +"``FitIns``),并只返回一个值(例如,``Client.fit`` 中的 " +"``FitRes``)。另一方面,``NumPyClient``中的方法有多个参数(例如,``NumPyClient.fit``中的``parameters``和``config``)和多个返回值(例如,``NumPyClient.fit``中的``parameters``、``num_example``和``metrics``)。在" +" ``Client`` 中的这些 ``*Ins`` 和 ``*Res`` 对象封装了你在 ``NumPyClient`` 中习惯使用的所有单个值。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:565 +msgid "Step 3: Custom serialization" +msgstr "步骤 3:自定义序列化" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:567 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "下面我们将通过一个简单的示例来探讨如何实现自定义序列化。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:569 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" +"首先,什么是序列化?序列化只是将对象转换为原始字节的过程,同样重要的是,反序列化是将原始字节转换回对象的过程。这对网络通信非常有用。事实上,如果没有序列化,你就无法通过互联网传输一个" +" Python 对象。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:571 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "通过在客户端和服务器之间来回发送 Python 对象,联合学习在很大程度上依赖于互联网通信进行训练。这意味着序列化是联邦学习的重要组成部分。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:573 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" +"在下面的章节中,我们将编写一个基本示例,在发送包含参数的 ``ndarray`` 前,我们将首先把 ``ndarray`` " +"转换为稀疏矩阵,而不是发送序列化版本。这种技术可以用来节省带宽,因为在某些情况下,模型的参数是稀疏的(包含许多 0 " +"条目),将它们转换成稀疏矩阵可以大大提高它们的字节数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:576 +msgid "Our custom serialization/deserialization functions" +msgstr "我们的定制序列化/反序列化功能" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:578 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" +"这才是真正的序列化/反序列化,尤其是在用于序列化的 ``ndarray_too_sparse_bytes`` 和用于反序列化的 " +"``sparse_bytes_too_ndarray`` 中。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:580 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "请注意,为了转换数组,我们导入了 ``scipy.sparse`` 库。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:668 +msgid "Client-side" +msgstr "客户端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:670 +msgid "" +"To be able to serialize our ``ndarray``\\ s into sparse parameters, we " +"will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "为了能够将我们的 ``ndarray`` 序列化为稀疏参数,我们只需在 ``flwr.client.Client`` 中调用我们的自定义函数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:672 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" +"事实上,在 `get_parameters` 中,我们需要使用上文定义的自定义 `ndarrays_too_sparse_parameters` " +"序列化从网络中获取的参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:674 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" +"在 ``fit`` 中,我们首先需要使用自定义的 ``sparse_parameters_to_ndarrays`` " +"反序列化来自服务器的参数,然后使用 ``ndarrays_to_sparse_parameters`` 序列化本地结果。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:676 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "在 ``evaluate`` 中,我们只需要用自定义函数反序列化全局参数。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:781 +msgid "Server-side" +msgstr "服务器端" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:783 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" +"在本例中,我们将只使用 ``FedAvg`` 作为策略。要改变这里的序列化和反序列化,我们只需重新实现 ``FedAvg`` 的 " +"``evaluate`` 和 ``aggregate_fit`` 函数。策略的其他函数将从超类 ``FedAvg`` 继承。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:785 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "正如你所看到的,``evaluate``中只修改了一行:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:791 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "而对于 ``aggregate_fit``,我们将首先反序列化收到的每个结果:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:800 +msgid "And then serialize the aggregated result:" +msgstr "然后将汇总结果序列化:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:959 +msgid "We can now run our custom serialization example!" +msgstr "现在我们可以运行自定义序列化示例!" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1000 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" +"在本部分教程中,我们已经了解了如何通过子类化 ``NumPyClient`` 或 ``Client`` 来构建客户端。NumPyClient " +"\"是一个便捷的抽象类,可以让我们更容易地与具有良好NumPy互操作性的机器学习库一起工作。``Client``是一个更灵活的抽象类,允许我们做一些在`NumPyClient``中做不到的事情。为此,它要求我们自己处理参数序列化和反序列化。" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1018 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "这暂时是 Flower 教程的最后一部分,恭喜您!您现在已经具备了理解其余文档的能力。本教程还有许多内容没有涉及,我们推荐您参考以下资源:" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1020 +msgid "`Read Flower Docs `__" +msgstr "阅读Flower文档 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1021 +#, fuzzy +msgid "`Check out Flower Code Examples `__" +msgstr "查看 Flower 代码示例 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1022 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "使用 \"Flower Baselines \"进行研究 `__" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:1023 +#, fuzzy +msgid "" +"`Watch Flower AI Summit 2024 videos `__" +msgstr "观看 2023 年Flower峰会视频 `__" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "开始使用Flower" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "欢迎阅读Flower联邦学习教程!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll build a federated learning system using the " +"Flower framework, Flower Datasets and PyTorch. In part 1, we use PyTorch " +"for the model training pipeline and data loading. In part 2, we federate " +"the PyTorch project using Flower." +msgstr "" +"在本笔记中,我们将使用 Flower 和 PyTorch 构建一个联邦学习系统。在第一部分中,我们使用 PyTorch " +"进行模型训练和数据加载。在第二部分中,我们将继续使用 Flower 联邦化基于 PyTorch 的框架。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:18 +#, fuzzy +msgid "Let's get started! 🌼" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:32 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "在开始编写实际代码之前,让我们先确保我们已经准备好了所需的一切。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:44 +#, fuzzy +msgid "Install dependencies" +msgstr "安装依赖项" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:46 +#, fuzzy +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``), Flower Datasets (``flwr-datasets``) and Flower " +"(``flwr``):" +msgstr "接下来,我们为 PyTorch(`torch`` 和`torchvision``)和 Flower(`flwr`)安装必要的软件包:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:109 +#, fuzzy +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware accelerator:" +" GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" +"可以切换到已启用 GPU 加速的运行时(在 Google Colab 上: 运行时 > 更改运行时类型 > 硬件加速: GPU > " +"保存``)。但请注意,Google Colab 并非总能提供 GPU 加速。如果在以下部分中看到与 GPU 可用性相关的错误,请考虑通过设置 " +"``DEVICE = torch.device(\"cpu\")`` 切回基于 CPU 的执行。如果运行时已启用 GPU " +"加速,你应该会看到输出``Training on cuda``,否则会显示``Training on cpu``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:122 +#, fuzzy +msgid "Load the data" +msgstr "加载数据" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:124 +#, fuzzy +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes: 'airplane', " +"'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', and " +"'truck'." +msgstr "" +"联邦学习可应用于不同领域的多种不同类型任务。在本教程中,我们将通过在流行的 CIFAR-10 数据集上训练一个简单的卷积神经网络 (CNN) " +"来介绍联合学习。CIFAR-10 可用于训练图像分类器,以区分来自十个不同类别的图像:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:135 +#, fuzzy +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (the data " +"is naturally partitioned)." +msgstr "" +"我们通过将原始 CIFAR-10 数据集拆分成多个分区来模拟来自多个组织的多个数据集(也称为联邦学习中的 \"跨分区 " +"\"设置)。每个分区代表一个组织的数据。我们这样做纯粹是为了实验目的,在现实世界中不需要拆分数据,因为每个组织都已经有了自己的数据(所以数据是自然分区的)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:137 +#, fuzzy +msgid "" +"Each organization will act as a client in the federated learning system. " +"Having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server." +msgstr "每个组织都将充当联邦学习系统中的客户端。因此,有十个组织参与联邦学习,就意味着有十个客户端连接到联邦学习服务器:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:148 +#, fuzzy +msgid "" +"We use the Flower Datasets library (``flwr-datasets``) to partition " +"CIFAR-10 into ten partitions using ``FederatedDataset``. We will create a" +" small training and test set for each of the ten organizations and wrap " +"each of these into a PyTorch ``DataLoader``:" +msgstr "" +"现在,让我们从 ``flwr-datasets`` 中创建 Federated Dataset 抽象,以分割 " +"CIFAR-10。我们将为每个边缘设备创建小型训练集和测试集,并将它们分别封装到 PyTorch ``DataLoader`` 中:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:196 +#, fuzzy +msgid "" +"We now have a function that can return a training set and validation set " +"(``trainloader`` and ``valloader``) representing one dataset from one of " +"ten different organizations. Each ``trainloader``/``valloader`` pair " +"contains 4000 training examples and 1000 validation examples. There's " +"also a single ``testloader`` (we did not split the test set). Again, this" +" is only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" +"现在,我们有一个包含十个训练集和十个验证集(`trainloaders`` 和`valloaders``)的列表,代表十个不同组织的数据。每对 " +"``trainloader``/``valloader`` 都包含 4500 个训练示例和 500 个验证数据。还有一个单独的 " +"``测试加载器``(我们没有拆分测试集)。同样,这只有在构建研究或教育系统时才有必要,实际的联邦学习系统的数据自然分布在多个分区中。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:199 +#, fuzzy +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloader`` from ``partition_id=0``) before we " +"move on:" +msgstr "在继续之前,让我们先看看第一个训练集中的第一批图像和标签(即 ``trainloaders[0]``):" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:241 +#, fuzzy +msgid "" +"The output above shows a random batch of images from the ``trainloader`` " +"from the first of ten partitions. It also prints the labels associated " +"with each image (i.e., one of the ten possible labels we've seen above). " +"If you run the cell again, you should see another batch of images." +msgstr "" +"上面的输出显示了来自十个 \"trainloader \"列表中第一个 \"trainloader " +"\"的随机图像。它还打印了与每幅图像相关的标签(即我们上面看到的十个可能标签之一)。如果您再次运行该单元,应该会看到另一批图像。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:253 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "步骤 1:使用 PyTorch 进行集中训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" +"接下来,我们将使用 PyTorch 来定义一个简单的卷积神经网络。本介绍假定您对 PyTorch 有基本的了解,因此不会详细介绍与 PyTorch" +" 相关的内容。如果你想更深入地了解 PyTorch,我们推荐你阅读 `DEEP LEARNING WITH PYTORCH: a 60 " +"minute blitz " +"`__。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +#, fuzzy +msgid "Define the model" +msgstr "定义模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:278 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" +"我们使用` PyTorch 教程 " +"`__ 中描述的简单 CNN:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:315 +msgid "Let's continue with the usual training and test functions:" +msgstr "让我们继续进行常规的训练和测试功能:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:375 +#, fuzzy +msgid "Train the model" +msgstr "训练模型" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:377 +#, fuzzy +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``partition_id=0``). This simulates the reality of most machine learning" +" projects today: each organization has their own data and trains models " +"only on this internal data:" +msgstr "现在我们拥有了所需的所有基本构件:数据集、模型、训练函数和测试函数。让我们把它们放在一起,在我们其中一个组织的数据集(``trainloaders[0]``)上训练模型。这模拟了当今大多数机器学习项目的实际情况:每个组织都有自己的数据,并且只在这些内部数据上训练模型:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:406 +#, fuzzy +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simple centralized training pipeline that sets " +"the stage for what comes next - federated learning!" +msgstr "" +"在我们的 CIFAR-10 分片上对简单 CNN 进行 5 个遍历的训练后,测试集的准确率应为 " +"41%,这并不理想,但同时对本教程而言也并不重要。我们只是想展示一个简单的集中式训练流程,为接下来的联邦学习做好铺垫!" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:418 +msgid "Step 2: Federated Learning with Flower" +msgstr "步骤 2:使用 Flower 联邦学习" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:420 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" +"步骤 1 演示了一个简单的集中式训练流程。所有数据都在一个地方(即一个 \"trainloader \"和一个 " +"\"valloader\")。接下来,我们将模拟在多个组织中拥有多个数据集的情况,并使用联邦学习在这些组织中训练一个模型。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:432 +#, fuzzy +msgid "Update model parameters" +msgstr "更新模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:434 +#, fuzzy +msgid "" +"In federated learning, the server sends global model parameters to the " +"client, and the client updates the local model with parameters received " +"from the server. It then trains the model on the local data (which " +"changes the model parameters locally) and sends the updated/changed model" +" parameters back to the server (or, alternatively, it sends just the " +"gradients back to the server, not the full model parameters)." +msgstr "在联邦学习中,服务器将全局模型参数发送给客户端,客户端根据从服务器接收到的参数更新本地模型。然后,客户端根据本地数据对模型进行训练(在本地更改模型参数),并将更新/更改后的模型参数发回服务器(或者,客户端只将梯度参数发回服务器,而不是全部模型参数)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:436 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" +"我们需要两个辅助函数,用从服务器接收到的参数更新本地模型,并从本地模型获取更新后的模型参数:`` " +"set_parameters```和`get_parameters``。下面两个函数就是为上面的 PyTorch 模型做这些工作的。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:438 +#, fuzzy +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which the Flower ``NumPyClient`` knows how to " +"serialize/deserialize):" +msgstr "" +"在这里,如何工作的细节并不重要(如果你想了解更多,请随时查阅 PyTorch 文档)。本质上,我们使用 ``state_dict`` 访问 " +"PyTorch 模型参数张量。然后,参数张量会被转换成/转换成 NumPy ndarray 列表(Flower 知道如何序列化/反序列化):" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:466 +#, fuzzy +msgid "Define the Flower ClientApp" +msgstr "Flower 客户端。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:468 +#, fuzzy +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create a ``ServerApp`` and a ``ClientApp`` to run the server-" +"side and client-side code, respectively." +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:470 +#, fuzzy +msgid "" +"The first step toward creating a ``ClientApp`` is to implement a " +"subclasses of ``flwr.client.Client`` or ``flwr.client.NumPyClient``. We " +"use ``NumPyClient`` in this tutorial because it is easier to implement " +"and requires us to write less boilerplate. To implement ``NumPyClient``, " +"we create a subclass that implements the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" +"说完这些,让我们进入有趣的部分。联邦学习系统由一个服务器和多个客户端组成。在 Flower 中,我们通过实现 " +"``flwr.client.Client`` 或 ``flwr.client.NumPyClient`` " +"的子类来创建客户端。在本教程中,我们使用``NumPyClient``,因为它更容易实现,需要我们编写的模板也更少。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:472 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "``get_parameters``: 返回当前本地模型参数" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:473 +#, fuzzy +msgid "" +"``fit``: Receive model parameters from the server, train the model on the" +" local data, and return the updated model parameters to the server" +msgstr "``fit``: 从服务器接收模型参数,在本地数据上训练模型参数,并将(更新的)模型参数返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:474 +#, fuzzy +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model on the local data, and return the evaluation result to the server" +msgstr "``evaluate ``: 从服务器接收模型参数,在本地数据上评估模型参数,并将评估结果返回服务器" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:476 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" +"我们提到,我们的客户端将使用之前定义的 PyTorch 组件进行模型训练和评估。让我们来看看一个简单的 Flower " +"客户端实现,它将一切都整合在一起:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:513 +#, fuzzy +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient`` (one on each of the " +"machines we'd start the client on). Flower calls ``FlowerClient.fit`` on " +"the respective instance when the server selects a particular client for " +"training (and ``FlowerClient.evaluate`` for evaluation)." +msgstr "" +"我们的类 ``FlowerClient`` 定义了本地训练/评估的执行方式,并允许 Flower 通过 ``fit`` 和 " +"``evaluate`` 调用本地训练/评估。每个 ``FlowerClient`` " +"实例都代表联邦学习系统中的*单个客户端*。联邦学习系统有多个客户端(否则就没有什么可联邦的),因此每个客户端都将由自己的 " +"``FlowerClient`` 实例来代表。例如,如果我们的工作负载中有三个客户端,那么我们就会有三个 ``FlowerClient`` " +"实例。当服务器选择特定客户端进行训练时,Flower 会调用相应实例上的 ``FlowerClient.fit`` (评估时调用 " +"``FlowerClient.evaluate``)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:516 +#, fuzzy +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients *on a single machine*. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" +"在本笔记中,我们要模拟一个联邦学习系统,在一台机器上有 10 个客户端。这意味着服务器和所有 10 个客户端都将位于一台机器上,并共享 " +"CPU、GPU 和内存等资源。有 10 个客户端就意味着内存中有 10 个 ``FlowerClient`` " +"实例。在单台机器上这样做会很快耗尽可用的内存资源,即使这些客户端中只有一个子集参与了一轮联邦学习。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:518 +#, fuzzy +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function that creates a ``FlowerClient`` instance on demand. We typically" +" call this function ``client_fn``. Flower calls ``client_fn`` whenever it" +" needs an instance of one particular client to call ``fit`` or " +"``evaluate`` (those instances are usually discarded after use, so they " +"should not keep any local state). In federated learning experiments using" +" Flower, clients are identified by a partition ID, or ``partition-id``. " +"This ``partition-id`` is used to load different local data partitions for" +" different clients, as can be seen below. The value of ``partition-id`` " +"is retrieved from the ``node_config`` dictionary in the ``Context`` " +"object, which holds the information that persists throughout each " +"training round." +msgstr "" +"除了服务器和客户端在多台机器上运行的常规功能外,Flower 还提供了特殊的模拟功能,即只有在训练或评估实际需要时才创建 " +"``FlowerClient`` 实例。为了让 Flower 框架能在必要时创建客户端,我们需要实现一个名为 ``client_fn`` " +"的函数,它能按需创建一个 ``FlowerClient`` 实例。每当 Flower 需要一个特定的客户端实例来调用 ``fit`` 或 " +"``evaluate`` 时,它就会调用 " +"``client_fn``(这些实例在使用后通常会被丢弃,因此它们不应保留任何本地状态)。客户端由一个客户端 ID 或简短的 ``cid`` " +"标识。例如,可以使用 ``cid`` 为不同的客户端加载不同的本地数据分区,如下所示:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:522 +#, fuzzy +msgid "" +"With this, we have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. Last, but definitely not least, we" +" create an instance of ``ClientApp`` and pass it the ``client_fn``. " +"``ClientApp`` is the entrypoint that a running Flower client uses to call" +" your code (as defined in, for example, ``FlowerClient.fit``)." +msgstr "" +"现在我们有了定义客户端训练/评估的类 ``FlowerClient`` 和允许 Flower 在需要调用某个客户端的 ``fit` 或 " +"``evaluate` 时创建 ``FlowerClient`` 实例的 ``client_fn` 类。最后一步是使用 " +"``flwr.simulation.start_simulation`` 启动实际模拟。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:563 +#, fuzzy +msgid "Define the Flower ServerApp" +msgstr "Flower 服务器。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:565 +#, fuzzy +msgid "" +"On the server side, we need to configure a strategy which encapsulates " +"the federated learning approach/algorithm, for example, *Federated " +"Averaging* (FedAvg). Flower has a number of built-in strategies, but we " +"can also use our own strategy implementations to customize nearly all " +"aspects of the federated learning approach. For this example, we use the " +"built-in ``FedAvg`` implementation and customize it using a few basic " +"parameters:" +msgstr "" +"Flower 有许多内置策略,但我们也可以使用自己的策略实现来定制联邦学习方法的几乎所有方面。在本例中,我们使用内置的 ``FedAvg`` " +"实现,并使用一些基本参数对其进行定制。最后一步是实际调用 ``start_simulation``开始模拟:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:592 +msgid "" +"Similar to ``ClientApp``, we create a ``ServerApp`` using a utility " +"function ``server_fn``. In ``server_fn``, we pass an instance of " +"``ServerConfig`` for defining the number of federated learning rounds " +"(``num_rounds``) and we also pass the previously created ``strategy``. " +"The ``server_fn`` returns a ``ServerAppComponents`` object containing the" +" settings that define the ``ServerApp`` behaviour. ``ServerApp`` is the " +"entrypoint that Flower uses to call all your server-side code (for " +"example, the strategy)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:629 +#, fuzzy +msgid "Run the training" +msgstr "开始训练" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:631 +msgid "" +"In simulation, we often want to control the amount of resources each " +"client can use. In the next cell, we specify a ``backend_config`` " +"dictionary with the ``client_resources`` key (required) for defining the " +"amount of CPU and GPU resources each client can access." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:659 +msgid "" +"The last step is the actual call to ``run_simulation`` which - you " +"guessed it - runs the simulation. ``run_simulation`` accepts a number of " +"arguments: - ``server_app`` and ``client_app``: the previously created " +"``ServerApp`` and ``ClientApp`` objects, respectively - " +"``num_supernodes``: the number of ``SuperNodes`` to simulate which equals" +" the number of clients for Flower simulation - ``backend_config``: the " +"resource allocation used in this simulation" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:686 +msgid "Behind the scenes" +msgstr "幕后" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:688 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "那么它是如何工作的呢?Flower 如何进行模拟?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:690 +#, fuzzy, python-format +msgid "" +"When we call ``run_simulation``, we tell Flower that there are 10 clients" +" (``num_supernodes=10``, where 1 ``SuperNode`` launches 1 ``ClientApp``)." +" Flower then goes ahead an asks the ``ServerApp`` to issue an " +"instructions to those nodes using the ``FedAvg`` strategy. ``FedAvg`` " +"knows that it should select 100% of the available clients " +"(``fraction_fit=1.0``), so it goes ahead and selects 10 random clients " +"(i.e., 100% of 10)." +msgstr "" +"当我们调用 ``start_simulation`` 时,我们会告诉 Flower 有 10 " +"个客户(`num_clients=10``)。然后,Flower 会要求 ``FedAvg`` 策略选择客户。``FedAvg`` 知道它应该选择" +" 100%的可用客户(``fraction_fit=1.0``),所以它会随机选择 10 个客户(即 10 的 100%)。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:692 +#, fuzzy +msgid "" +"Flower then asks the selected 10 clients to train the model. Each of the " +"10 ``ClientApp`` instances receives a message, which causes it to call " +"``client_fn`` to create an instance of ``FlowerClient``. It then calls " +"``.fit()`` on each the ``FlowerClient`` instances and returns the " +"resulting model parameter updates to the ``ServerApp``. When the " +"``ServerApp`` receives the model parameter updates from the clients, it " +"hands those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" +"然后,Flower 会要求选定的 10 " +"个客户端对模型进行训练。服务器收到客户端的模型参数更新后,会将这些更新交给策略(*FedAvg*)进行聚合。策略会聚合这些更新并返回新的全局模型,然后将其用于下一轮联邦学习。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:705 +msgid "Where's the accuracy?" +msgstr "准确度在哪里找?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:707 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" +"您可能已经注意到,除了 ``losses_distributed`` 以外,所有指标都是空的。{\"准确度\": " +"float(准确度)}``去哪儿了?" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:709 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" +"Flower 可以自动汇总单个客户端返回的损失值,但无法对通用度量字典中的度量进行同样的处理(即带有 \"准确度 " +"\"键的度量字典)。度量值字典可以包含非常不同种类的度量值,甚至包含根本不是度量值的键/值对,因此框架不知道(也无法知道)如何自动处理这些度量值。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:711 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" +"作为用户,我们需要告诉框架如何处理/聚合这些自定义指标,为此,我们将指标聚合函数传递给策略。然后,只要从客户端接收到拟合或评估指标,策略就会调用这些函数。两个可能的函数是" +" ``fit_metrics_aggregation_fn`` 和 ``evaluate_metrics_aggregation_fn``。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:713 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "让我们创建一个简单的加权平均函数来汇总从 ``evaluate`` 返回的 ``accuracy`` 指标:" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:781 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" +"我们现在有了一个完整的系统,可以执行联邦训练和联邦评估。它使用 ``weighted_average`` " +"函数汇总自定义评估指标,并在服务器端计算所有客户端的单一 ``accuracy`` 指标。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:783 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" +"其他两类指标(`losses_centralized`` 和 " +"`metrics_centralized`)仍然是空的,因为它们只适用于集中评估。Flower 教程的第二部分将介绍集中式评估。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:795 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "结束语" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:797 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" +"恭喜您,你刚刚训练了一个由 10 个客户端组成的卷积神经网络!这样,你就了解了使用 Flower " +"进行联邦学习的基础知识。你所看到的方法同样适用于其他机器学习框架(不只是 PyTorch)和任务(不只是 CIFAR-10 图像分类),例如使用 " +"Hugging Face Transformers 的 NLP 或使用 SpeechBrain 的语音。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:799 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "在下一个笔记中,我们将介绍一些更先进的概念。想定制你的策略吗?在服务器端初始化参数?或者在服务器端评估聚合模型?我们将在下一个教程中介绍所有这些内容以及更多。" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:817 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" +"`Flower 联邦学习教程 - 第 2 部分 `__ 更深入地介绍了策略以及可以使用策略构建的所有高级功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "使用联邦学习策略" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" +"欢迎来到联邦学习教程的下一部分。在本教程的前几部分,我们介绍了使用 PyTorch 和 Flower 进行联邦学习(`第 1 部分 " +"`___)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +#, fuzzy +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook again, using the Flower framework, " +"Flower Datasets, and PyTorch." +msgstr "" +"在本笔记中,我们将开始定制在入门笔记中构建的联邦学习系统(再次使用 `Flower `__ 和 " +"`PyTorch `__)。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:18 +#, fuzzy +msgid "Let's move beyond FedAvg with Flower strategies! 🌼" +msgstr "让我们超越 FedAvg,采用Flower策略!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:121 +#, fuzzy +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_partitions`` which allows us to call ``load_datasets`` with " +"different numbers of partitions." +msgstr "" +"现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +"个较小的数据集(每个数据集又分为训练集和验证集),并将所有数据都封装在各自的 ``DataLoader`` 中。我们引入了一个新参数 " +"``num_clients``,它允许我们使用不同数量的客户端调用 ``load_datasets``。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:321 +msgid "Strategy customization" +msgstr "策略定制" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "到目前为止,如果您已经阅读过入门笔记本,那么一切都应该很熟悉了。接下来,我们将介绍一些新功能。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:335 +msgid "Server-side parameter **initialization**" +msgstr "服务器端参数 **初始化**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:337 +#, fuzzy +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy. We create an " +"instance of ``Net()`` and get the paramaters as follows:" +msgstr "" +"默认情况下,Flower 会通过向一个随机客户端询问初始参数来初始化全局模型。但在许多情况下,我们需要对参数初始化进行更多控制。因此,Flower" +" 允许您直接将初始参数传递给策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:358 +msgid "" +"Next, we create a ``server_fn`` that returns the components needed for " +"the server. Within ``server_fn``, we create a Strategy that uses the " +"initial parameters." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:393 +#, fuzzy +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. In " +"``server_fn``, we pass this new ``strategy`` and a ``ServerConfig`` for " +"defining the number of federated learning rounds (``num_rounds``)." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:395 +msgid "" +"Similar to the ``ClientApp``, we now create the ``ServerApp`` using the " +"``server_fn``:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:416 +msgid "" +"Last but not least, we specify the resources for each client and run the " +"simulation." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:448 +#, fuzzy +msgid "" +"If we look closely, we can see that the logs do not show any calls to the" +" ``FlowerClient.get_parameters`` method." +msgstr "" +"向 ``FedAvg`` 策略传递 ``initial_parameters`` 可以防止 Flower " +"向其中一个客户端询问初始参数。如果我们仔细观察,就会发现日志中没有显示对 ``FlowerClient.get_parameters`` " +"方法的任何调用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:460 +msgid "Starting with a customized strategy" +msgstr "从定制战略开始" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:462 +#, fuzzy +msgid "" +"We've seen the function ``run_simulation`` before. It accepts a number of" +" arguments, amongst them the ``server_app`` which wraps around the " +"strategy and number of training rounds, ``client_app`` which wraps around" +" the ``client_fn`` used to create ``FlowerClient`` instances, and the " +"number of clients to simulate which equals ``num_supernodes``." +msgstr "" +"我们以前见过函数 ``start_simulation``。它接受许多参数,其中包括用于创建 ``FlowerClient`` 实例的 " +"``client_fn``、要模拟的客户数量 ``num_clients``、回合数 ``num_rounds``和策略。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:464 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "该策略封装了联邦学习方法/算法,例如`FedAvg``或`FedAdagrad``。这次让我们尝试使用不同的策略:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:509 +msgid "Server-side parameter **evaluation**" +msgstr "服务器端参数**评估**" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:511 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "Flower 可以在服务器端或客户端评估聚合模型。客户端和服务器端评估在某些方面相似,但也有不同之处。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:513 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "**集中评估**(或*服务器端评估*)在概念上很简单:它的工作方式与集中式机器学习中的评估方式相同。如果有一个服务器端数据集可用于评估目的,那就太好了。我们可以在每一轮训练后对新聚合的模型进行评估,而无需将模型发送给客户端。我们也很幸运,因为我们的整个评估数据集随时可用。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:515 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "**联邦评估**(或*客户端评估*)更为复杂,但也更为强大:它不需要集中的数据集,允许我们在更大的数据集上对模型进行评估,这通常会产生更真实的评估结果。事实上,如果我们想得到有代表性的评估结果,很多情况下都需要使用**联邦评估**。但是,这种能力是有代价的:一旦我们开始在客户端进行评估,我们就应该意识到,如果这些客户端并不总是可用,我们的评估数据集可能会在连续几轮学习中发生变化。此外,每个客户端所拥有的数据集也可能在连续几轮学习中发生变化。这可能会导致评估结果不稳定,因此即使我们不改变模型,也会看到评估结果在连续几轮中波动。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:518 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" +"我们已经了解了联邦评估如何在客户端工作(即通过在 ``FlowerClient`` 中实现 ``evaluate`` " +"方法)。现在让我们看看如何在服务器端评估聚合模型参数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:549 +msgid "" +"We create a ``FedAvg`` strategy and pass ``evaluate_fn`` to it. Then, we " +"create a ``ServerApp`` that uses this strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:586 +#, fuzzy +msgid "Finally, we run the simulation." +msgstr "运行模拟" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "向/从客户端发送/接收任意值" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" +"在某些情况下,我们希望从服务器端配置客户端的执行(训练、评估)。其中一个例子就是服务器要求客户端训练一定数量的本地遍历。Flower " +"提供了一种使用字典从服务器向客户端发送配置值的方法。让我们来看一个例子:客户端通过 ``fit`` 中的 ``config`` " +"参数从服务器接收配置值(``evaluate`` 中也有 ``config`` 参数)。``fit`` 方法通过 ``config`` " +"参数接收配置字典,然后从字典中读取值。在本例中,它读取了 ``server_round`` 和 " +"``local_epochs``,并使用这些值来改进日志记录和配置本地训练遍历的数量:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:674 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" +"那么,如何将配置字典从服务器发送到客户端呢?内置的 \"Flower策略\"(Flower " +"Strategies)提供了这样的方法,其工作原理与服务器端评估的工作原理类似。我们为策略提供一个函数,策略会在每一轮联邦学习中调用这个函数:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:704 +#, fuzzy +msgid "" +"Next, we'll pass this function to the FedAvg strategy before starting the" +" simulation:" +msgstr "接下来,我们只需在开始模拟前将此函数传递给 FedAvg 策略即可:" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:749 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" +"我们可以看到,客户端日志现在包含了当前一轮的联邦学习(从 ``config`` " +"字典中读取)。我们还可以将本地训练配置为在第一轮和第二轮联邦学习期间运行一个遍历,然后在第三轮联邦学习期间运行两个遍历。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:751 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" +"客户端还可以向服务器返回任意值。为此,它们会从 ``fit`` 和/或 ``evaluate`` " +"返回一个字典。我们在本笔记中看到并使用了这一概念,但并未明确提及:我们的 ``FlowerClient`` 返回一个包含自定义键/值对的字典,作为" +" ``evaluate`` 中的第三个返回值。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:763 +msgid "Scaling federated learning" +msgstr "扩大联邦学习的规模" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:765 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "作为本笔记的最后一步,让我们看看如何使用 Flower 对大量客户端进行实验。" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:785 +msgid "" +"Note that we can reuse the ``ClientApp`` for different ``num-partitions``" +" since the Context is defined by the ``num_supernodes`` argument in " +"``run_simulation()``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:787 +#, fuzzy, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.025``, which means that only 2.5% of " +"available clients (so 25 clients) will be selected for training each " +"round:" +msgstr "" +"现在我们有 1000 个分区,每个分区有 45 个训练数据和 5 " +"个验证数据。鉴于每个客户端上的训练示例数量较少,我们可能需要对模型进行更长时间的训练,因此我们将客户端配置为执行 3 " +"个本地训练遍历。我们还应该调整每轮训练中被选中的客户端的比例(我们不希望每轮训练都有 1000 个客户端参与),因此我们将 " +"``fraction_fit`` 调整为 ``0.05``,这意味着每轮训练只选中 5%的可用客户端(即 50 个客户端):" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:843 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "在本笔记中,我们看到了如何通过自定义策略、在服务器端初始化参数、选择不同的策略以及在服务器端评估模型来逐步增强我们的系统。用这么少的代码就能实现这么大的灵活性,不是吗?" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:845 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" +"在后面的章节中,我们将看到如何在服务器和客户端之间传递任意值,以完全自定义客户端执行。有了这种能力,我们使用 Flower " +"虚拟客户端引擎构建了一个大规模的联邦学习模拟,并在 Jupyter Notebook 中进行了一次实验,在相同的工作负载中运行了 1000 " +"个客户端!" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:863 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` from " +"scratch." +msgstr "" +"`Flower 联邦学习教程 - 第 3 部分 `__ 展示了如何从头开始构建完全自定义的 \"策略\"。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "什么是联邦学习?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" +"在本教程中,你将了解什么是联邦学习,用 Flower " +"搭建第一个系统,并逐步对其进行扩展。如果你能完成本教程的所有部分,你就能构建高级的联邦学习系统,从而接近该领域当前的技术水平。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "🧑‍🏫 本教程从零开始,不要求熟悉联邦学习。仅假定对数据科学和 Python 编程有基本了解。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" +"`Star Flower on GitHub `__ ⭐️ 并加入 Slack " +"上的开源 Flower 社区,进行交流、提问并获得帮助: 加入 Slack `__ 🌼" +" 我们希望在 ``#introductions`` 频道听到您的声音!如果有任何不清楚的地方,请访问 ``#questions`` 频道。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "让我们开始吧!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "经典机器学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "在开始讨论联邦学习之前,让我们先快速回顾一下目前大多数机器学习的工作原理。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|ac0a9766e26044d6aea222a829859b20|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "模型和数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|36cd6e248b1443ce8a82b5a025bba368|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "使用数据训练模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "实际上,我们使用的训练数据并不来自我们训练模型的机器。它是在其他地方创建的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|bf4fb057f4774df39e1dcb5c71fd804a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "手机上的数据" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" +"值得一提的是,这个 \"其他地方 " +"\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|71bb9f3c74c04f959b9bc1f02b736c95|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "数据存在于多种设备中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|7605632e1b0f49599ffacf841491fcfb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "集中数据收集" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|91b1b5a7d3484eb7a2350c1923f18307|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "集中模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "经典机器学习面临的挑战" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|5405ed430e4746e28b083b146fb71731|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "可集中管理" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|a389e87dab394eb48a8949aa2397687b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "无法集中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "传统的集中式机器学习方法无法满足现实世界中大量极为重要的使用案例,原因有很多。这些原因包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" +"**法规**: " +"GDPR(欧洲)、CCPA(加利福尼亚)、PIPEDA(加拿大)、LGPD(巴西)、PDPL(阿根廷)、KVKK(土耳其)、POPI(南非)、FSS(俄罗斯)、CDPR(中国)、PDPB(印度)、PIPA(韩国)、APPI(日本)、PDP(印度尼西亚)、PDPA(新加坡)、APP(澳大利亚)等法规保护敏感数据不被移动。事实上,这些法规有时甚至会阻止单个组织将自己的用户数据用于人工智能培训,因为这些用户生活在世界不同地区,他们的数据受不同的数据保护法规管辖。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" +"**用户偏好**: " +"除了法规之外,在一些使用案例中,用户只是希望数据永远不会离开他们的设备。如果你在手机的数字键盘上输入密码和信用卡信息,你不会希望这些密码最终出现在开发该键盘的公司的服务器上吧?事实上,这种用例正是联邦学习发明的初衷。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" +"**数据量**: " +"有些传感器(如摄像头)产生的数据量很大,收集所有数据既不可行,也不经济(例如,由于带宽或通信效率的原因)。试想一下全国铁路服务,全国有数百个火车站。如果每个火车站都安装了许多安全摄像头,那么它们所产生的大量原始设备数据就需要功能强大且极其昂贵的基础设施来处理和存储。而大部分数据甚至都是无用的。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "集中式机器学习不起作用的例子包括:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "用多家医院的敏感医疗记录训练癌症检测模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "不同组织的财务信息,以侦查财务欺诈行为" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "通过电动汽车的定位数据更好地预测续航里程" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "端到端加密信息可训练出更好的自动完成模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alternative" +" exists. But what can we do to apply machine learning and data science to" +" these cases to utilize private data? After all, these are all areas that" +" would benefit significantly from recent advances in AI." +msgstr "" +"像 `Brave `__浏览器或 `Signal " +"`__信息管理器这样的隐私增强系统的流行表明,用户关心隐私。事实上,他们会选择隐私性更好的产品。但是,我们能做些什么来将机器学习和数据科学应用到这些情况中,以利用隐私数据呢?毕竟,这些领域都将从人工智能的最新进展中受益匪浅。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "联邦学习" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "联邦学习简单地颠覆了这种方法。它通过将训练转移到数据上,而不是将数据转移到训练上,在分布式数据上实现机器学习。下面是一句话的解释:" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "集中式机器学习:将数据转移到计算中心" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "联邦式(机器)学习:将计算转移到数据上" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "这样,我们就能在以前不可能的领域使用机器学习(和其他数据科学方法)。现在,我们可以通过让不同的医院协同工作来训练优秀的医疗人工智能模型。我们可以通过在不同金融机构的数据上训练人工智能模型来解决金融欺诈问题。我们可以构建新颖的隐私增强型应用(如安全信息),其内置的人工智能比非隐私增强型应用更好。以上只是我想到的几个例子。随着联邦学习的部署,我们会发现越来越多的领域可以突然重获新生,因为它们现在可以访问大量以前无法访问的数据。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "那么,联邦学习究竟是如何运作的呢?让我们从直观的解释开始。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "联邦学习的五个步骤" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "步骤 0:初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|89c412136a5146ec8dc32c0973729f12|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "初始化全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "第 1 步:将模型发送到多个连接的组织/设备(客户节点)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|9503d3dc3a144e8aa295f8800cd8a766|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "发送全局模型" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "步骤 2:在本地对每个机构/设备(客户端节点)的数据进行模型训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" +"现在,所有(选定的)客户端节点都有了最新版本的全局模型参数,它们开始进行本地训练。它们使用自己的本地数据集来训练自己的本地模型。它们不会一直训练到模型完全收敛为止,而只是训练一小段时间。这可能只是本地数据上的一个遍历,甚至只是几个步骤" +"(mini-batches)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|aadb59e29b9e445d8e239d9a8a7045cb|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "根据本地数据进行训练" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "步骤 3:将模型参数更新返回服务器" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|a7579ad7734347508e959d9e14f2f53d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "发送模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "步骤 4:将模型更新聚合到新的全局模型中" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" +"服务器从选定的客户端节点接收模型更新。如果服务器选择了 100 个客户端节点,那么它现在就拥有 100 " +"个略有不同的原始全局模型版本,每个版本都是根据一个客户端的本地数据训练出来的。难道我们不希望有一个包含所有 100 个客户节点数据的模型吗?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" +"为了得到一个单一的模型,我们必须将从客户端节点收到的所有模型更新合并起来。这个过程称为*聚合*,有许多不同的方法。最基本的方法称为 " +"*Federated Averaging* (`McMahan等人,2016 " +"`__),通常缩写为*FedAvg*。*FedAvg* 可以把100 " +"个模型更新进行平均。更准确地说,它取的是模型更新的*加权平均值*,根据每个客户端用于训练的数据数量进行加权。加权对于确保每个数据示例对生成的全局模型具有相同的" +" \"影响 \"非常重要。如果一个客户端有 10 个数据点,而另一个客户有 100 个数据点,那么在不加权的情况下,10 个示例对全局模型的影响是" +" 100 个示例的 10 倍。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|73d15dd1d4fc41678b2d54815503fbe8|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "聚合模型参数更新" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "步骤 5:重复步骤 1 至 4,直至模型收敛" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" +"步骤 1 至 4 就是我们所说的单轮联邦学习。全局模型参数被发送到参与的客户端节点(第 1 步),客户端节点对其本地数据进行训练(第 2 " +"步),然后将更新后的模型发送到服务器(第 3 步),服务器汇总模型更新,得到新版本的全局模型(第 4 步)。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" +"在一轮迭代中,每个参与迭代的客户节点只训练一小段时间。这意味着,在聚合步骤(步骤 " +"4)之后,我们的模型已经在所有参与的客户节点的所有数据上训练过了,但只训练了一小会儿。然后,我们必须一次又一次地重复这一训练过程,最终得到一个经过全面训练的模型,该模型在所有客户节点的数据中都表现良好。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" +"恭喜您,现在您已经了解了联邦学习的基础知识。当然,要讨论的内容还有很多,但这只是联邦学习的一个缩影。在本教程的后半部分,我们将进行更详细的介绍。有趣的问题包括" +" 我们如何选择最好的客户端节点参与下一轮学习?聚合模型更新的最佳方法是什么?如何处理失败的客户端节点(落伍者)?" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" +"就像我们可以在不同客户节点的分散数据上训练一个模型一样,我们也可以在这些数据上对模型进行评估,以获得有价值的指标。这就是所谓的联邦评估,有时简称为" +" FE。事实上,联邦评估是大多数联邦学习系统不可或缺的一部分。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "联邦分析" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "在很多情况下,机器学习并不是从数据中获取价值的必要条件。数据分析可以产生有价值的见解,但同样,往往没有足够的数据来获得明确的答案。人们患某种健康疾病的平均年龄是多少?联邦分析可以通过多个客户端节点进行此类查询。它通常与安全聚合等其他隐私增强技术结合使用,以防止服务器看到单个客户端节点提交的结果。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" +"差分隐私(DP)经常在联邦学习中被提及。这是一种在分析和共享统计数据时使用的隐私保护方法,可确保单个参与者的隐私。DP " +"通过在模型更新中添加统计噪声来实现这一目的,确保任何个体参与者的信息都无法被区分或重新识别。这种技术可被视为一种优化,提供了一种可量化的隐私保护措施。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "Flower" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" +"联邦学习、联邦评估和联邦分析需要基础框架来来回移动机器学习模型,在本地数据上对其进行训练和评估,然后汇总更新的模型。Flower " +"提供的基础架构正是以简单、可扩展和安全的方式实现这些目标的。简而言之,Flower " +"为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|55472eef61274ba1b739408607e109df|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "Flower联邦学习服务器和客户端节点(汽车、滑板车、个人电脑、roomba 和电话)" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "恭喜您,您刚刚了解了联邦学习的基础知识,以及它与传统(集中式)机器学习的关系!" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "在本教程的下一部分,我们将用 Flower 建立第一个联邦学习系统。" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" +"在继续之前,请务必加入 Slack 上的 Flower 社区:`Join Slack `__" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" +"`Flower 联邦学习教程 - 第 1 部分 `__ 展示了如何使用 PyTorch 和 Flower " +"构建一个简单的联邦学习系统。" + +#~ msgid "Before the release" +#~ msgstr "发布前" + +#~ msgid "" +#~ "Update the changelog (``changelog.md``) with" +#~ " all relevant changes that happened " +#~ "after the last release. If the " +#~ "last release was tagged ``v1.2.0``, you" +#~ " can use the following URL to " +#~ "see all commits that got merged " +#~ "into ``main`` since then:" +#~ msgstr "" +#~ "更新更新日志 (``changelog.md``),加入上次发布后发生的所有相关变更。如果上次发布的版本被标记为 " +#~ "``v1.2.0``,则可以使用以下 URL 查看此后合并到 ``main`` 的所有提交:" + +#~ msgid "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" +#~ msgstr "" +#~ "`GitHub: Compare v1.2.0...main " +#~ "`_" + +#~ msgid "" +#~ "Thank the authors who contributed since" +#~ " the last release. This can be " +#~ "done by running the ``./dev/add-" +#~ "shortlog.sh`` convenience script (it can " +#~ "be ran multiple times and will " +#~ "update the names in the list if" +#~ " new contributors were added in the" +#~ " meantime)." +#~ msgstr "" +#~ "感谢自上次发布以来做出贡献的作者。可以通过运行 ``./dev/add-shortlog.sh`` " +#~ "方便脚本来完成(可以多次运行,如果在此期间有新的贡献者加入,则会更新列表中的名字)。" + +#~ msgid "" +#~ "Update the ``changelog.md`` section header " +#~ "``Unreleased`` to contain the version " +#~ "number and date for the release " +#~ "you are building. Create a pull " +#~ "request with the change." +#~ msgstr "" +#~ "更新 ``changelog.md`` 部分的标题 ``Unreleased`` " +#~ "以包含你正在构建的版本的版本号和日期。创建一个包含更改的拉取请求。" + +#~ msgid "" +#~ "Second, create a virtual environment " +#~ "(and activate it). If you chose to" +#~ " use :code:`pyenv` (with the :code" +#~ ":`pyenv-virtualenv` plugin) and already " +#~ "have it installed , you can use" +#~ " the following convenience script (by " +#~ "default it will use :code:`Python " +#~ "3.8.17`, but you can change it by" +#~ " providing a specific :code:``)::" +#~ msgstr "" +#~ "其次,创建虚拟环境(并激活它)。如果您选择使用 :code:`pyenv`(使用 :code:`pyenv-" +#~ "virtualenv`插件),并且已经安装了该插件,则可以使用下面的便捷脚本(默认情况下使用 " +#~ ":code:`Python3.8.17`,但您可以通过提供特定的 :code:`<版本>`来更改)::" + +#~ msgid "flwr (Python API reference)" +#~ msgstr "flwr(Python API 参考)" + +#~ msgid "..." +#~ msgstr "..." + +#~ msgid "Starting a client with an insecure server connection:" +#~ msgstr "使用不安全的服务器连接启动客户端:" + +#~ msgid "server.strategy.FedAvg" +#~ msgstr "server.strategy.FedAvg" + +#~ msgid "server.strategy.FedAvgM" +#~ msgstr "server.strategy.FedAvgM" + +#~ msgid "Configurable FedAvg with Momentum strategy implementation." +#~ msgstr "可配置的 FedAvg 动量策略实施。" + +#~ msgid "Fraction of clients used during training. Defaults to 0.1." +#~ msgstr "训练期间使用客户的比例。默认为 0.1。" + +#~ msgid "Fraction of clients used during validation. Defaults to 0.1." +#~ msgstr "验证过程中使用的客户端比例。默认为 0.1。" + +#~ msgid "server.strategy.FedMedian" +#~ msgstr "server.strategy.FedMedian" + +#~ msgid "server.strategy.QFedAvg" +#~ msgstr "server.strategy.QFedAvg" + +#~ msgid "server.strategy.FedOpt" +#~ msgstr "server.strategy.FedOpt" + +#~ msgid "Configurable FedAdagrad strategy implementation." +#~ msgstr "可配置的 FedAdagrad 策略实施。" + +#~ msgid "Federated Optim strategy interface." +#~ msgstr "Federated Optim 策略界面。" + +#~ msgid "server.strategy.FedProx" +#~ msgstr "server.strategy.FedProx" + +#~ msgid "Configurable FedProx strategy implementation." +#~ msgstr "可配置的 FedProx 策略实施。" + +#~ msgid "server.strategy.FedAdagrad" +#~ msgstr "server.strategy.FedAdagrad" + +#~ msgid "Paper: https://arxiv.org/abs/2003.00295" +#~ msgstr "论文: https://arxiv.org/abs/2003.00295" + +#~ msgid "Federated learning strategy using Adagrad on server-side." +#~ msgstr "在服务器端使用 Adagrad 的联邦学习策略。" + +#~ msgid "server.strategy.FedAdam" +#~ msgstr "server.strategy.FedAdam" + +#~ msgid "server.strategy.FedYogi" +#~ msgstr "server.strategy.FedYogi" + +#~ msgid "Adaptive Federated Optimization using Yogi." +#~ msgstr "使用 Yogi 的自适应联合优化。" + +#~ msgid "Federated learning strategy using Yogi on server-side." +#~ msgstr "在服务器端使用 Yogi 的联邦学习策略。" + +#~ msgid "Paper: https://arxiv.org/abs/1803.01498" +#~ msgstr "论文:https://arxiv.org/abs/1803.01498" + +#~ msgid "server.strategy.Krum" +#~ msgstr "server.strategy.Krum" + +#~ msgid "Configurable Krum strategy implementation." +#~ msgstr "可配置的 Krum 策略实施。" + +#~ msgid "server.strategy.Bulyan" +#~ msgstr "server.strategy.Bulyan" + +#~ msgid "Bulyan strategy implementation." +#~ msgstr "Bulyan策略的实施。" + +#~ msgid "server.strategy.FedXgbNnAvg" +#~ msgstr "server.strategy.FedXgbNnAvg" + +#~ msgid "Federated XGBoost [Ma et al., 2023] strategy." +#~ msgstr "Federated XGBoost [Ma 等人,2023] 策略。" + +#~ msgid "server.strategy.DPFedAvgAdaptive" +#~ msgstr "server.strategy.DPFedAvgAdaptive" + +#~ msgid "" +#~ "**Fix the incorrect return types of " +#~ "Strategy** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" +#~ msgstr "" +#~ "**修复策略的错误返回类型** " +#~ "([#2432](https://github.com/adap/flower/pull/2432/files))" + +#~ msgid "" +#~ "The types of the return values in" +#~ " the docstrings in two methods " +#~ "(`aggregate_fit` and `aggregate_evaluate`) now " +#~ "match the hint types in the code." +#~ msgstr "" +#~ "两个方法(\"aggregate_fit \"和 " +#~ "\"aggregate_evaluate\")的文档说明中的返回值类型现在与代码中的提示类型一致。" + +#~ msgid "" +#~ "**Update Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" +#~ msgstr "" +#~ "** 更新 Flower Examples** " +#~ "([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +#~ " [#2526](https://github.com/adap/flower/pull/2526))" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "对于客户端就需要做这么多。我们仅需要实现 " +#~ ":code:`Client`或者:code:`NumPyClient`然后调用:code:`fl.client.start_client()`。字符串" +#~ " :code:`\"0.0.0.0:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用:code:`\"0.0.0.0:8080\"`。如果我们运行真正联邦学习的工作负载,服务器和客户端在不同的机器上运行,则需要更改的只是我们传递给客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "对于客户来说就是这样了。我们只需实现 :code:`Client` 或 " +#~ ":code:`NumPyClient` 并调用:code:`fl.client.start_client()` " +#~ "即可。字符串 :code:`\"[::]:8080\"` " +#~ "告诉客户端要连接到哪个服务器。在我们的例子中,我们可以在同一台机器上运行服务器和客户端,因此我们使用 " +#~ ":code:`\"[::]:8080\"`。如果我们运行真正联邦的工作负载,服务器和客户端运行在不同的机器上,则需要更改的只是我们指向客户端的" +#~ " server_address 。" + +#~ msgid "" +#~ "Let's now load the CIFAR-10 training " +#~ "and test set, partition them into " +#~ "ten smaller datasets (each split into" +#~ " training and validation set), and " +#~ "wrap the resulting partitions by " +#~ "creating a PyTorch ``DataLoader`` for " +#~ "each of them:" +#~ msgstr "" +#~ "现在,让我们加载 CIFAR-10 训练集和测试集,将它们分割成 10 " +#~ "个较小的数据集(每个数据集又分为训练集和验证集),并通过为每个数据集创建 PyTorch " +#~ "``DataLoader`` 来包装由此产生的分割集:" + +#~ msgid "|e1dd4b4129b040bea23a894266227080|" +#~ msgstr "|e1dd4b4129b040bea23a894266227080|" + +#~ msgid "|c0d4cc6a442948dca8da40d2440068d9|" +#~ msgstr "|c0d4cc6a442948dca8da40d2440068d9|" + +#~ msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +#~ msgstr "|174e1e4fa1f149a19bfbc8bc1126f46a|" + +#~ msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +#~ msgstr "|4e021a3dc08249d2a89daa3ab03c2714|" + +#~ msgid "|e74a1d5ce7eb49688651f2167a59065b|" +#~ msgstr "|e74a1d5ce7eb49688651f2167a59065b|" + +#~ msgid "|eb29ec4c7aef4e93976795ed72df647e|" +#~ msgstr "|eb29ec4c7aef4e93976795ed72df647e|" + +#~ msgid "|c2f699d8ac484f5081721a6f1511f70d|" +#~ msgstr "|c2f699d8ac484f5081721a6f1511f70d|" + +#~ msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +#~ msgstr "|cf42accdacbf4e5eb4fa0503108ba7a7|" + +#~ msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +#~ msgstr "|5ec8356bc2564fa09178b1ceed5beccc|" + +#~ msgid "|7c9329e97bd0430bad335ab605a897a7|" +#~ msgstr "|7c9329e97bd0430bad335ab605a897a7|" + +#~ msgid "|88002bbce1094ba1a83c9151df18f707|" +#~ msgstr "|88002bbce1094ba1a83c9151df18f707|" + +#~ msgid "|391766aee87c482c834c93f7c22225e2|" +#~ msgstr "|391766aee87c482c834c93f7c22225e2|" + +#~ msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +#~ msgstr "|93b9a15bd27f4e91b40f642c253dfaac|" + +#~ msgid "|a23d9638f96342ef9d25209951e2d564|" +#~ msgstr "|a23d9638f96342ef9d25209951e2d564|" + +#~ msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +#~ msgstr "上传 whl(例如 ``flwr-1.6.0-py3-none-any.whl``)" + +#~ msgid "" +#~ "Change ``!pip install -q 'flwr[simulation]'" +#~ " torch torchvision matplotlib`` to ``!pip" +#~ " install -q 'flwr-1.6.0-py3-none-" +#~ "any.whl[simulation]' torch torchvision matplotlib``" +#~ msgstr "" +#~ "将``!pip install -q 'flwr[simulation]' torch" +#~ " torchvision matplotlib``更改为``!pip install -q " +#~ "'flwr-1.6.0-py3-none-any.whl[simulation]' torch " +#~ "torch torchvision matplotlib``" + +#~ msgid "" +#~ "All that's left to do it to " +#~ "define a function that loads both " +#~ "model and data, creates a " +#~ ":code:`CifarClient`, and starts this client." +#~ " You load your data and model " +#~ "by using :code:`cifar.py`. Start " +#~ ":code:`CifarClient` with the function " +#~ ":code:`fl.client.start_numpy_client()` by pointing " +#~ "it at the same IP address we " +#~ "used in :code:`server.py`:" +#~ msgstr "" +#~ "剩下要做的就是定义一个加载模型和数据的函数,创建一个 :code:`CifarClient` 并启动该客户端。使用" +#~ " :code:`cifar.py` 加载数据和模型。使用函数 " +#~ ":code:`fl.client.start_numpy_client()` 启动 " +#~ ":code:`CifarClient`,将其指向我们在 :code:`server.py` 中使用的相同 " +#~ "IP 地址:" + +#~ msgid "" +#~ "The :code:`VirtualClientEngine` schedules, launches" +#~ " and manages `virtual` clients. These " +#~ "clients are identical to `non-virtual`" +#~ " clients (i.e. the ones you launch" +#~ " via the command `flwr.client.start_numpy_client" +#~ " `_)" +#~ " in the sense that they can be" #~ " configure by creating a class " #~ "inheriting, for example, from " #~ "`flwr.client.NumPyClient `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " -#~ "`_\" " -#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " -#~ "管理的客户端还包括:" +#~ "代码:`VirtualClientEngine`调度、启动和管理`虚拟`客户端。这些客户端与 \"非虚拟 " +#~ "\"客户端(即通过命令 `flwr.client.start_numpy_client `_启动的客户端)完全相同,它们可以通过创建一个继承自 \"flwr.client.NumPyClient " +#~ "`_\" " +#~ "的类来配置,因此行为方式也完全相同。除此之外,由 :code:`VirtualClientEngine` " +#~ "管理的客户端还包括:" + +#~ msgid "Example: Walk-Through PyTorch & MNIST" +#~ msgstr "实例: PyTorch 和 MNIST 的演练" + +#~ msgid "" +#~ "In this tutorial we will learn, " +#~ "how to train a Convolutional Neural " +#~ "Network on MNIST using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead an install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" + +#~ msgid "Ready... Set... Train!" +#~ msgstr "准备...设置...训练!" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Basic MNIST Example" +#~ " `_. " +#~ "This will allow you see how easy" +#~ " it is to wrap your code with" +#~ " Flower and begin training in a " +#~ "federated way. We provide you with " +#~ "two helper scripts, namely *run-" +#~ "server.sh*, and *run-clients.sh*. Don't " +#~ "be afraid to look inside, they are" +#~ " simple enough =)." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的 `Basic MNIST Example " +#~ "`_。您会发现用 " +#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" +#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" + +#~ msgid "" +#~ "Go ahead and launch on a terminal" +#~ " the *run-server.sh* script first as" +#~ " follows:" +#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" + +#~ msgid "Now that the server is up and running, go ahead and launch the clients." +#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" + +#~ msgid "" +#~ "Et voilà! You should be seeing the" +#~ " training procedure and, after a few" +#~ " iterations, the test accuracy for " +#~ "each client." +#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" + +#~ msgid "Now, let's see what is really happening inside." +#~ msgstr "现在,让我们看看里面到底发生了什么。" + +#~ msgid "" +#~ "Inside the server helper script *run-" +#~ "server.sh* you will find the following" +#~ " code that basically runs the " +#~ ":code:`server.py`" +#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" + +#~ msgid "" +#~ "We can go a bit deeper and " +#~ "see that :code:`server.py` simply launches " +#~ "a server that will coordinate three " +#~ "rounds of training. Flower Servers are" +#~ " very customizable, but for simple " +#~ "workloads, we can start a server " +#~ "using the :ref:`start_server ` function and leave " +#~ "all the configuration possibilities at " +#~ "their default values, as seen below." +#~ msgstr "" +#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " +#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" + +#~ msgid "" +#~ "Next, let's take a look at the " +#~ "*run-clients.sh* file. You will see " +#~ "that it contains the main loop " +#~ "that starts a set of *clients*." +#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" + +#~ msgid "" +#~ "**cid**: is the client ID. It is" +#~ " an integer that uniquely identifies " +#~ "client identifier." +#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" + +#~ msgid "**sever_address**: String that identifies IP and port of the server." +#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" + +#~ msgid "" +#~ "**nb_clients**: This defines the number " +#~ "of clients being created. This piece " +#~ "of information is not required by " +#~ "the client, but it helps us " +#~ "partition the original MNIST dataset to" +#~ " make sure that every client is " +#~ "working on unique subsets of both " +#~ "*training* and *test* sets." +#~ msgstr "" +#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " +#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" +#~ " 数据集上有独立的数据。" + +#~ msgid "" +#~ "Again, we can go deeper and look" +#~ " inside :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`. After going through the" +#~ " argument parsing code at the " +#~ "beginning of our :code:`main` function, " +#~ "you will find a call to " +#~ ":code:`mnist.load_data`. This function is " +#~ "responsible for partitioning the original " +#~ "MNIST datasets (*training* and *test*) " +#~ "and returning a :code:`torch.utils.data.DataLoader`" +#~ " s for each of them. We then" +#~ " instantiate a :code:`PytorchMNISTClient` object" +#~ " with our client ID, our DataLoaders," +#~ " the number of epochs in each " +#~ "round, and which device we want to" +#~ " use for training (CPU or GPU)." +#~ msgstr "" +#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" +#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" +#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " +#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " +#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " +#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " +#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" + +#~ msgid "" +#~ "The :code:`PytorchMNISTClient` object when " +#~ "finally passed to :code:`fl.client.start_client` " +#~ "along with the server's address as " +#~ "the training process begins." +#~ msgstr "" +#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " +#~ ":code:`fl.client.start_client`。" + +#~ msgid "A Closer Look" +#~ msgstr "仔细看一下" + +#~ msgid "" +#~ "Now, let's look closely into the " +#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" +#~ ".quickstart-pytorch.mnist` and see what it" +#~ " is doing:" +#~ msgstr "" +#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" +#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" + +#~ msgid "" +#~ "The first thing to notice is that" +#~ " :code:`PytorchMNISTClient` instantiates a CNN" +#~ " model inside its constructor" +#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" + +#~ msgid "" +#~ "The code for the CNN is available" +#~ " under :code:`quickstart-pytorch.mnist` and " +#~ "it is reproduced below. It is the" +#~ " same network found in `Basic MNIST" +#~ " Example " +#~ "`_." +#~ msgstr "" +#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " +#~ "下找到,现复制如下。它与 `Basic MNIST Example " +#~ "`_中的网络相同。" + +#~ msgid "" +#~ "The second thing to notice is that" +#~ " :code:`PytorchMNISTClient` class inherits from" +#~ " the :code:`fl.client.Client`, and hence it" +#~ " must implement the following methods:" +#~ msgstr "" +#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " +#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" + +#~ msgid "" +#~ "When comparing the abstract class to " +#~ "its derived class :code:`PytorchMNISTClient` " +#~ "you will notice that :code:`fit` calls" +#~ " a :code:`train` function and that " +#~ ":code:`evaluate` calls a :code:`test`: " +#~ "function." +#~ msgstr "" +#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " +#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " +#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" + +#~ msgid "" +#~ "These functions can both be found " +#~ "inside the same :code:`quickstart-" +#~ "pytorch.mnist` module:" +#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" + +#~ msgid "" +#~ "Observe that these functions encapsulate " +#~ "regular training and test loops and " +#~ "provide :code:`fit` and :code:`evaluate` with" +#~ " final statistics for each round. You" +#~ " could substitute them with your " +#~ "custom train and test loops and " +#~ "change the network architecture, and the" +#~ " entire example would still work " +#~ "flawlessly. As a matter of fact, " +#~ "why not try and modify the code" +#~ " to an example of your liking?" +#~ msgstr "" +#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " +#~ ":code:`evaluate` " +#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" + +#~ msgid "Give It a Try" +#~ msgstr "试试看" + +#~ msgid "" +#~ "Looking through the quickstart code " +#~ "description above will have given a " +#~ "good understanding of how *clients* and" +#~ " *servers* work in Flower, how to " +#~ "run a simple experiment, and the " +#~ "internals of a client wrapper. Here " +#~ "are a few things you could try " +#~ "on your own and get more " +#~ "experience with Flower:" +#~ msgstr "" +#~ "通过上面的快速入门代码描述,你将对 Flower " +#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" +#~ " Flower 的经验:" + +#~ msgid "" +#~ "Try and change :code:`PytorchMNISTClient` so" +#~ " it can accept different architectures." +#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" + +#~ msgid "" +#~ "Modify the :code:`train` function so " +#~ "that it accepts different optimizers" +#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" + +#~ msgid "" +#~ "Modify the :code:`test` function so that" +#~ " it proves not only the top-1 " +#~ "(regular accuracy) but also the top-5" +#~ " accuracy?" +#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" + +#~ msgid "" +#~ "Go larger! Try to adapt the code" +#~ " to larger images and datasets. Why" +#~ " not try training on ImageNet with" +#~ " a ResNet-50?" +#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" + +#~ msgid "You are ready now. Enjoy learning in a federated way!" +#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" + +#~ msgid "Differential privacy" +#~ msgstr "差别隐私" + +#~ msgid "" +#~ "Flower provides differential privacy (DP) " +#~ "wrapper classes for the easy integration" +#~ " of the central DP guarantees " +#~ "provided by DP-FedAvg into training " +#~ "pipelines defined in any of the " +#~ "various ML frameworks that Flower is " +#~ "compatible with." +#~ msgstr "" +#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " +#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " +#~ "框架中定义的训练模式中。" + +#~ msgid "" +#~ "Please note that these components are" +#~ " still experimental; the correct " +#~ "configuration of DP for a specific " +#~ "task is still an unsolved problem." +#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" + +#~ msgid "" +#~ "The name DP-FedAvg is misleading " +#~ "since it can be applied on top " +#~ "of any FL algorithm that conforms " +#~ "to the general structure prescribed by" +#~ " the FedOpt family of algorithms." +#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" + +#~ msgid "DP-FedAvg" +#~ msgstr "DP-FedAvg" + +#~ msgid "" +#~ "DP-FedAvg, originally proposed by " +#~ "McMahan et al. [mcmahan]_ and extended" +#~ " by Andrew et al. [andrew]_, is " +#~ "essentially FedAvg with the following " +#~ "modifications." +#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" + +#~ msgid "" +#~ "**Clipping** : The influence of each " +#~ "client's update is bounded by clipping" +#~ " it. This is achieved by enforcing" +#~ " a cap on the L2 norm of " +#~ "the update, scaling it down if " +#~ "needed." +#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" + +#~ msgid "" +#~ "**Noising** : Gaussian noise, calibrated " +#~ "to the clipping threshold, is added " +#~ "to the average computed at the " +#~ "server." +#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" + +#~ msgid "" +#~ "The distribution of the update norm " +#~ "has been shown to vary from " +#~ "task-to-task and to evolve as " +#~ "training progresses. This variability is " +#~ "crucial in understanding its impact on" +#~ " differential privacy guarantees, emphasizing " +#~ "the need for an adaptive approach " +#~ "[andrew]_ that continuously adjusts the " +#~ "clipping threshold to track a " +#~ "prespecified quantile of the update norm" +#~ " distribution." +#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" + +#~ msgid "Simplifying Assumptions" +#~ msgstr "简化假设" + +#~ msgid "" +#~ "We make (and attempt to enforce) a" +#~ " number of assumptions that must be" +#~ " satisfied to ensure that the " +#~ "training process actually realizes the " +#~ ":math:`(\\epsilon, \\delta)` guarantees the " +#~ "user has in mind when configuring " +#~ "the setup." +#~ msgstr "" +#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " +#~ ":math:`(\\epsilon,\\delta)` 。" + +#~ msgid "" +#~ "**Fixed-size subsampling** :Fixed-size " +#~ "subsamples of the clients must be " +#~ "taken at each round, as opposed to" +#~ " variable-sized Poisson subsamples." +#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" + +#~ msgid "" +#~ "**Unweighted averaging** : The contributions" +#~ " from all the clients must weighted" +#~ " equally in the aggregate to " +#~ "eliminate the requirement for the server" +#~ " to know in advance the sum of" +#~ " the weights of all clients available" +#~ " for selection." +#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" + +#~ msgid "" +#~ "**No client failures** : The set " +#~ "of available clients must stay constant" +#~ " across all rounds of training. In" +#~ " other words, clients cannot drop out" +#~ " or fail." +#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" + +#~ msgid "" +#~ "The first two are useful for " +#~ "eliminating a multitude of complications " +#~ "associated with calibrating the noise to" +#~ " the clipping threshold, while the " +#~ "third one is required to comply " +#~ "with the assumptions of the privacy " +#~ "analysis." +#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" + +#~ msgid "" +#~ "These restrictions are in line with " +#~ "constraints imposed by Andrew et al. " +#~ "[andrew]_." +#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" + +#~ msgid "Customizable Responsibility for Noise injection" +#~ msgstr "可定制的噪声注入" + +#~ msgid "" +#~ "In contrast to other implementations " +#~ "where the addition of noise is " +#~ "performed at the server, you can " +#~ "configure the site of noise injection" +#~ " to better match your threat model." +#~ " We provide users with the " +#~ "flexibility to set up the training " +#~ "such that each client independently adds" +#~ " a small amount of noise to the" +#~ " clipped update, with the result that" +#~ " simply aggregating the noisy updates " +#~ "is equivalent to the explicit addition" +#~ " of noise to the non-noisy " +#~ "aggregate at the server." +#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" + +#~ msgid "" +#~ "To be precise, if we let :math:`m`" +#~ " be the number of clients sampled " +#~ "each round and :math:`\\sigma_\\Delta` be " +#~ "the scale of the total Gaussian " +#~ "noise that needs to be added to" +#~ " the sum of the model updates, " +#~ "we can use simple maths to show" +#~ " that this is equivalent to each " +#~ "client adding noise with scale " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." +#~ msgstr "" +#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " +#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " +#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" + +#~ msgid "Wrapper-based approach" +#~ msgstr "基于封装的方法" + +#~ msgid "" +#~ "Introducing DP to an existing workload" +#~ " can be thought of as adding an" +#~ " extra layer of security around it." +#~ " This inspired us to provide the " +#~ "additional server and client-side logic" +#~ " needed to make the training process" +#~ " differentially private as wrappers for " +#~ "instances of the :code:`Strategy` and " +#~ ":code:`NumPyClient` abstract classes respectively." +#~ " This wrapper-based approach has the" +#~ " advantage of being easily composable " +#~ "with other wrappers that someone might" +#~ " contribute to the Flower library in" +#~ " the future, e.g., for secure " +#~ "aggregation. Using Inheritance instead can " +#~ "be tedious because that would require" +#~ " the creation of new sub- classes " +#~ "every time a new class implementing " +#~ ":code:`Strategy` or :code:`NumPyClient` is " +#~ "defined." +#~ msgstr "" +#~ "在现有工作负载中引入 DP " +#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " +#~ ":code:`Strategy` 和 :code:`NumPyClient` " +#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " +#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" +#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" + +#~ msgid "" +#~ "The first version of our solution " +#~ "was to define a decorator whose " +#~ "constructor accepted, among other things, " +#~ "a boolean-valued variable indicating " +#~ "whether adaptive clipping was to be " +#~ "enabled or not. We quickly realized " +#~ "that this would clutter its " +#~ ":code:`__init__()` function with variables " +#~ "corresponding to hyperparameters of adaptive" +#~ " clipping that would remain unused " +#~ "when it was disabled. A cleaner " +#~ "implementation could be achieved by " +#~ "splitting the functionality into two " +#~ "decorators, :code:`DPFedAvgFixed` and " +#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" +#~ " classing the former. The constructors " +#~ "for both classes accept a boolean " +#~ "parameter :code:`server_side_noising`, which, as " +#~ "the name suggests, determines where " +#~ "noising is to be performed." +#~ msgstr "" +#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " +#~ ":code:`__init__()` " +#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" +#~ " :code:`DPFedAvgFixed` 和 " +#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " +#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" + +#~ msgid "" +#~ "The server-side capabilities required " +#~ "for the original version of DP-" +#~ "FedAvg, i.e., the one which performed" +#~ " fixed clipping, can be completely " +#~ "captured with the help of wrapper " +#~ "logic for just the following two " +#~ "methods of the :code:`Strategy` abstract " +#~ "class." +#~ msgstr "" +#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" +#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : The config " +#~ "dictionary being sent by the wrapped " +#~ ":code:`Strategy` to each client needs to" +#~ " be augmented with an additional " +#~ "value equal to the clipping threshold" +#~ " (keyed under :code:`dpfedavg_clip_norm`) and," +#~ " if :code:`server_side_noising=true`, another one" +#~ " equal to the scale of the " +#~ "Gaussian noise that needs to be " +#~ "added at the client (keyed under " +#~ ":code:`dpfedavg_noise_stddev`). This entails " +#~ "*post*-processing of the results returned " +#~ "by the wrappee's implementation of " +#~ ":code:`configure_fit()`." +#~ msgstr "" +#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " +#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " +#~ "下键入)进行扩充。并且,如果 " +#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " +#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " +#~ "所返回的结果进行后处理。" + +#~ msgid "" +#~ ":code:`aggregate_fit()`: We check whether any" +#~ " of the sampled clients dropped out" +#~ " or failed to upload an update " +#~ "before the round timed out. In " +#~ "that case, we need to abort the" +#~ " current round, discarding any successful" +#~ " updates that were received, and move" +#~ " on to the next one. On the " +#~ "other hand, if all clients responded " +#~ "successfully, we must force the " +#~ "averaging of the updates to happen " +#~ "in an unweighted manner by intercepting" +#~ " the :code:`parameters` field of " +#~ ":code:`FitRes` for each received update " +#~ "and setting it to 1. Furthermore, " +#~ "if :code:`server_side_noising=true`, each update " +#~ "is perturbed with an amount of " +#~ "noise equal to what it would have" +#~ " been subjected to had client-side" +#~ " noising being enabled. This entails " +#~ "*pre*-processing of the arguments to " +#~ "this method before passing them on " +#~ "to the wrappee's implementation of " +#~ ":code:`aggregate_fit()`." +#~ msgstr "" +#~ ":code:`aggregate_fit()`: " +#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" +#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " +#~ "1,强制以不加权的方式平均更新。此外,如果 " +#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" +#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " +#~ "之前,对参数进行*预*处理。" + +#~ msgid "" +#~ "We can't directly change the aggregation" +#~ " function of the wrapped strategy to" +#~ " force it to add noise to the" +#~ " aggregate, hence we simulate client-" +#~ "side noising to implement server-side" +#~ " noising." +#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" + +#~ msgid "" +#~ "These changes have been put together " +#~ "into a class called :code:`DPFedAvgFixed`, " +#~ "whose constructor accepts the strategy " +#~ "being decorated, the clipping threshold " +#~ "and the number of clients sampled " +#~ "every round as compulsory arguments. The" +#~ " user is expected to specify the " +#~ "clipping threshold since the order of" +#~ " magnitude of the update norms is " +#~ "highly dependent on the model being " +#~ "trained and providing a default value" +#~ " would be misleading. The number of" +#~ " clients sampled at every round is" +#~ " required to calculate the amount of" +#~ " noise that must be added to " +#~ "each individual update, either by the" +#~ " server or the clients." +#~ msgstr "" +#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " +#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" + +#~ msgid "" +#~ "The additional functionality required to " +#~ "facilitate adaptive clipping has been " +#~ "provided in :code:`DPFedAvgAdaptive`, a " +#~ "subclass of :code:`DPFedAvgFixed`. It " +#~ "overrides the above-mentioned methods to" +#~ " do the following." +#~ msgstr "" +#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " +#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" + +#~ msgid "" +#~ ":code:`configure_fit()` : It intercepts the" +#~ " config dict returned by " +#~ ":code:`super.configure_fit()` to add the " +#~ "key-value pair " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " +#~ "which the client interprets as an " +#~ "instruction to include an indicator bit" +#~ " (1 if update norm <= clipping " +#~ "threshold, 0 otherwise) in the results" +#~ " returned by it." +#~ msgstr "" +#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " +#~ "返回的 config 字典,并在其中添加键-值对 " +#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" +#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" + +#~ msgid "" +#~ ":code:`aggregate_fit()` : It follows a " +#~ "call to :code:`super.aggregate_fit()` with one" +#~ " to :code:`__update_clip_norm__()`, a procedure" +#~ " which adjusts the clipping threshold " +#~ "on the basis of the indicator bits" +#~ " received from the sampled clients." +#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" + +#~ msgid "" +#~ "The client-side capabilities required " +#~ "can be completely captured through " +#~ "wrapper logic for just the :code:`fit()`" +#~ " method of the :code:`NumPyClient` abstract" +#~ " class. To be precise, we need " +#~ "to *post-process* the update computed" +#~ " by the wrapped client to clip " +#~ "it, if necessary, to the threshold " +#~ "value supplied by the server as " +#~ "part of the config dictionary. In " +#~ "addition to this, it may need to" +#~ " perform some extra work if either" +#~ " (or both) of the following keys " +#~ "are also present in the dict." +#~ msgstr "" +#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " +#~ ":code:`fit()` " +#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" + +#~ msgid "" +#~ ":code:`dpfedavg_noise_stddev` : Generate and " +#~ "add the specified amount of noise " +#~ "to the clipped update." +#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" + +#~ msgid "" +#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" +#~ " metrics dict in the :code:`FitRes` " +#~ "object being returned to the server " +#~ "with an indicator bit, calculated as " +#~ "described earlier." +#~ msgstr "" +#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" +#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" + +#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" + +#~ msgid "" +#~ "Assume you have trained for :math:`n`" +#~ " rounds with sampling fraction :math:`q`" +#~ " and noise multiplier :math:`z`. In " +#~ "order to calculate the :math:`\\epsilon` " +#~ "value this would result in for a" +#~ " particular :math:`\\delta`, the following " +#~ "script may be used." +#~ msgstr "" +#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " +#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" +#~ " 值,可以使用下面的脚本。" + +#~ msgid "Flower driver SDK." +#~ msgstr "Flower 服务器。" + +#~ msgid "driver" +#~ msgstr "服务器" + +#~ msgid "Get task results." +#~ msgstr "汇总训练结果。" + +#~ msgid "Request for run ID." +#~ msgstr "Flower 基线申请" + +#~ msgid "Get client IDs." +#~ msgstr "返回客户端(本身)。" + +#~ msgid "" +#~ "Flower usage examples used to be " +#~ "bundled with Flower in a package " +#~ "called ``flwr_example``. We are migrating " +#~ "those examples to standalone projects to" +#~ " make them easier to use. All " +#~ "new examples are based in the " +#~ "directory `examples " +#~ "`_." +#~ msgstr "" +#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" +#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " +#~ "`_。" + +#~ msgid "Quickstart TensorFlow/Keras" +#~ msgstr "快速入门 TensorFlow/Keras" + +#~ msgid "Legacy Examples (`flwr_example`)" +#~ msgstr "传统示例 (`flwr_example`)" + +#~ msgid "" +#~ "The useage examples in `flwr_example` " +#~ "are deprecated and will be removed " +#~ "in the future. New examples are " +#~ "provided as standalone projects in " +#~ "`examples `_." +#~ msgstr "" +#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " +#~ "`examples `_" +#~ " 中提供。" + +#~ msgid "Extra Dependencies" +#~ msgstr "额外依赖" + +#~ msgid "" +#~ "The core Flower framework keeps a " +#~ "minimal set of dependencies. The " +#~ "examples demonstrate Flower in the " +#~ "context of different machine learning " +#~ "frameworks, so additional dependencies need" +#~ " to be installed before an example" +#~ " can be run." +#~ msgstr "" +#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " +#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" + +#~ msgid "For PyTorch examples::" +#~ msgstr "PyTorch 示例::" + +#~ msgid "For TensorFlow examples::" +#~ msgstr "TensorFlow 示例::" + +#~ msgid "For both PyTorch and TensorFlow examples::" +#~ msgstr "PyTorch 和 TensorFlow 示例::" + +#~ msgid "" +#~ "Please consult :code:`pyproject.toml` for a" +#~ " full list of possible extras " +#~ "(section :code:`[tool.poetry.extras]`)." +#~ msgstr "" +#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " +#~ ":code:`[tool.poems.extras]`)。" + +#~ msgid "PyTorch Examples" +#~ msgstr "PyTorch 示例" + +#~ msgid "" +#~ "Our PyTorch examples are based on " +#~ "PyTorch 1.7. They should work with " +#~ "other releases as well. So far, we" +#~ " provide the following examples." +#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" + +#~ msgid "CIFAR-10 Image Classification" +#~ msgstr "CIFAR-10 图像分类" + +#~ msgid "" +#~ "`CIFAR-10 and CIFAR-100 " +#~ "`_ are " +#~ "popular RGB image datasets. The Flower" +#~ " CIFAR-10 example uses PyTorch to " +#~ "train a simple CNN classifier in a" +#~ " federated learning setup with two " +#~ "clients." +#~ msgstr "" +#~ "CIFAR-10 和 CIFAR-100 " +#~ "``_ 是流行的 RGB" +#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " +#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" + +#~ msgid "First, start a Flower server:" +#~ msgstr "首先,启动 Flower 服务器:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" + +#~ msgid "Then, start the two clients in a new terminal window:" +#~ msgstr "然后,在新的终端窗口中启动两个客户端:" + +#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" + +#~ msgid "ImageNet-2012 Image Classification" +#~ msgstr "ImageNet-2012 图像分类" + +#~ msgid "" +#~ "`ImageNet-2012 `_ is " +#~ "one of the major computer vision " +#~ "datasets. The Flower ImageNet example " +#~ "uses PyTorch to train a ResNet-18 " +#~ "classifier in a federated learning setup" +#~ " with ten clients." +#~ msgstr "" +#~ "ImageNet-2012 `_ " +#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " +#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" + +#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" + +#~ msgid "TensorFlow Examples" +#~ msgstr "TensorFlow 示例" + +#~ msgid "" +#~ "Our TensorFlow examples are based on " +#~ "TensorFlow 2.0 or newer. So far, " +#~ "we provide the following examples." +#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" + +#~ msgid "Fashion-MNIST Image Classification" +#~ msgstr "Fashion-MNIST 图像分类" + +#~ msgid "" +#~ "`Fashion-MNIST `_ is often used as " +#~ "the \"Hello, world!\" of machine " +#~ "learning. We follow this tradition and" +#~ " provide an example which samples " +#~ "random local datasets from Fashion-MNIST" +#~ " and trains a simple image " +#~ "classification model over those partitions." +#~ msgstr "" +#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" +#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" + +#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" + +#~ msgid "" +#~ "For more details, see " +#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" + +#~ msgid "``BASE_IMAGE_TAG``" +#~ msgstr "基本图像标签" + +#~ msgid "The image tag of the base image." +#~ msgstr "基础图像的图像标记。" + +#~ msgid "" +#~ "It is important to follow the " +#~ "instructions described in comments. For " +#~ "instance, in order to not break " +#~ "how our changelog system works, you " +#~ "should read the information above the" +#~ " ``Changelog entry`` section carefully. You" +#~ " can also checkout some examples and" +#~ " details in the :ref:`changelogentry` " +#~ "appendix." +#~ msgstr "" +#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " +#~ ":ref:`changelogentry` 附录中的一些示例和细节。" + +#~ msgid "Open a PR (as shown above)" +#~ msgstr "打开 PR(如上图所示)" + +#~ msgid "How to write a good PR title" +#~ msgstr "如何撰写好的公关标题" + +#~ msgid "" +#~ "A well-crafted PR title helps team" +#~ " members quickly understand the purpose " +#~ "and scope of the changes being " +#~ "proposed. Here's a guide to help " +#~ "you write a good GitHub PR title:" +#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" + +#~ msgid "" +#~ "1. Be Clear and Concise: Provide a" +#~ " clear summary of the changes in " +#~ "a concise manner. 1. Use Actionable " +#~ "Verbs: Start with verbs like \"Add,\"" +#~ " \"Update,\" or \"Fix\" to indicate " +#~ "the purpose. 1. Include Relevant " +#~ "Information: Mention the affected feature " +#~ "or module for context. 1. Keep it" +#~ " Short: Avoid lengthy titles for easy" +#~ " readability. 1. Use Proper Capitalization" +#~ " and Punctuation: Follow grammar rules " +#~ "for clarity." +#~ msgstr "" +#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " +#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " +#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" +#~ " 遵守语法规则,以确保清晰。" + +#~ msgid "" +#~ "Let's start with a few examples " +#~ "for titles that should be avoided " +#~ "because they do not provide meaningful" +#~ " information:" +#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" + +#~ msgid "Implement Algorithm" +#~ msgstr "执行算法" + +#~ msgid "Add my_new_file.py to codebase" +#~ msgstr "在代码库中添加 my_new_file.py" + +#~ msgid "Improve code in module" +#~ msgstr "改进模块中的代码" + +#~ msgid "Change SomeModule" +#~ msgstr "更改 SomeModule" + +#~ msgid "" +#~ "Here are a few positive examples " +#~ "which provide helpful information without " +#~ "repeating how they do it, as that" +#~ " is already visible in the \"Files" +#~ " changed\" section of the PR:" +#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" + +#~ msgid "Update docs banner to mention Flower Summit 2023" +#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" + +#~ msgid "Remove unnecessary XGBoost dependency" +#~ msgstr "移除不必要的 XGBoost 依赖性" + +#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" +#~ msgstr "删除 FedAvg 子类化策略中的多余属性" + +#~ msgid "" +#~ "Add CI job to deploy the staging" +#~ " system when the ``main`` branch " +#~ "changes" +#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" + +#~ msgid "" +#~ "Add new amazing library which will " +#~ "be used to improve the simulation " +#~ "engine" +#~ msgstr "添加新的惊人库,用于改进模拟引擎" + +#~ msgid "Changelog entry" +#~ msgstr "更新日志" + +#~ msgid "" +#~ "When opening a new PR, inside its" +#~ " description, there should be a " +#~ "``Changelog entry`` header." +#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" + +#~ msgid "" +#~ "Above this header you should see " +#~ "the following comment that explains how" +#~ " to write your changelog entry:" +#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" + +#~ msgid "" +#~ "Inside the following 'Changelog entry' " +#~ "section, you should put the description" +#~ " of your changes that will be " +#~ "added to the changelog alongside your" +#~ " PR title." +#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" + +#~ msgid "" +#~ "If the section is completely empty " +#~ "(without any token) or non-existent, " +#~ "the changelog will just contain the " +#~ "title of the PR for the changelog" +#~ " entry, without any description." +#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" + +#~ msgid "" +#~ "If the section contains some text " +#~ "other than tokens, it will use it" +#~ " to add a description to the " +#~ "change." +#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" + +#~ msgid "" +#~ "If the section contains one of the" +#~ " following tokens it will ignore any" +#~ " other text and put the PR " +#~ "under the corresponding section of the" +#~ " changelog:" +#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" + +#~ msgid " is for classifying a PR as a general improvement." +#~ msgstr " 用于将 PR 划分为一般改进。" + +#~ msgid " is to not add the PR to the changelog" +#~ msgstr "表示不将 PR 添加到更新日志中" + +#~ msgid " is to add a general baselines change to the PR" +#~ msgstr " 是指在 PR 中添加一般基线更改" + +#~ msgid " is to add a general examples change to the PR" +#~ msgstr " 是在 PR 中添加对一般示例的修改" + +#~ msgid " is to add a general sdk change to the PR" +#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" + +#~ msgid " is to add a general simulations change to the PR" +#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" + +#~ msgid "Note that only one token should be used." +#~ msgstr "请注意,只能使用一个标记。" + +#~ msgid "" +#~ "Its content must have a specific " +#~ "format. We will break down what " +#~ "each possibility does:" +#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains nothing or doesn't exist, " +#~ "the following text will be added " +#~ "to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains a description (and no " +#~ "token), the following text will be " +#~ "added to the changelog::" +#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, nothing will change" +#~ " in the changelog." +#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text" +#~ " will be added to the changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following text " +#~ "will be added to the changelog::" +#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "If the ``### Changelog entry`` section" +#~ " contains ````, the following " +#~ "text will be added to the " +#~ "changelog::" +#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" + +#~ msgid "" +#~ "Note that only one token must be" +#~ " provided, otherwise, only the first " +#~ "action (in the order listed above), " +#~ "will be performed." +#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" + +#~ msgid "Example: MXNet - Run MXNet Federated" +#~ msgstr "示例: MXNet - 运行联邦式 MXNet" + +#~ msgid "" +#~ "This tutorial will show you how to" +#~ " use Flower to build a federated " +#~ "version of an existing MXNet workload." +#~ " We are using MXNet to train a" +#~ " Sequential model on the MNIST " +#~ "dataset. We will structure the example" +#~ " similar to our `PyTorch - From " +#~ "Centralized To Federated " +#~ "`_ walkthrough. " +#~ "MXNet and PyTorch are very similar " +#~ "and a very good comparison between " +#~ "MXNet and PyTorch is given `here " +#~ "`_. First, " +#~ "we build a centralized training approach" +#~ " based on the `Handwritten Digit " +#~ "Recognition " +#~ "`_" +#~ " tutorial. Then, we build upon the" +#~ " centralized training code to run the" +#~ " training in a federated fashion." +#~ msgstr "" +#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" +#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " +#~ "`PyTorch - 从集中式到联邦式 " +#~ "`_ 教程类似的示例结构。MXNet" +#~ " 和 PyTorch 非常相似,参考 `此处 " +#~ "`_对 MXNet " +#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " +#~ "`" +#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" + +#~ msgid "" +#~ "Before we start setting up our " +#~ "MXNet example, we install the " +#~ ":code:`mxnet` and :code:`flwr` packages:" +#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" + +#~ msgid "MNIST Training with MXNet" +#~ msgstr "使用 MXNet 进行 MNIST 训练" + +#~ msgid "" +#~ "We begin with a brief description " +#~ "of the centralized training code based" +#~ " on a :code:`Sequential` model. If " +#~ "you want a more in-depth " +#~ "explanation of what's going on then " +#~ "have a look at the official `MXNet" +#~ " tutorial " +#~ "`_." +#~ msgstr "" +#~ "首先,我们将简要介绍基于 :code:`Sequential` " +#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " +#~ "`_。" + +#~ msgid "" +#~ "Let's create a new file " +#~ "called:code:`mxnet_mnist.py` with all the " +#~ "components required for a traditional " +#~ "(centralized) MNIST training. First, the " +#~ "MXNet package :code:`mxnet` needs to be" +#~ " imported. You can see that we " +#~ "do not yet import the :code:`flwr` " +#~ "package for federated learning. This " +#~ "will be done later." +#~ msgstr "" +#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " +#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " +#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" + +#~ msgid "" +#~ "The :code:`load_data()` function loads the " +#~ "MNIST training and test sets." +#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" + +#~ msgid "" +#~ "As already mentioned, we will use " +#~ "the MNIST dataset for this machine " +#~ "learning workload. The model architecture " +#~ "(a very simple :code:`Sequential` model) " +#~ "is defined in :code:`model()`." +#~ msgstr "" +#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " +#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" + +#~ msgid "" +#~ "We now need to define the training" +#~ " (function :code:`train()`) which loops " +#~ "over the training set and measures " +#~ "the loss for each batch of " +#~ "training examples." +#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" + +#~ msgid "" +#~ "The evaluation of the model is " +#~ "defined in function :code:`test()`. The " +#~ "function loops over all test samples " +#~ "and measures the loss and accuracy " +#~ "of the model based on the test " +#~ "dataset." +#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" + +#~ msgid "" +#~ "Having defined the data loading, model" +#~ " architecture, training, and evaluation we" +#~ " can put everything together and " +#~ "train our model on MNIST. Note " +#~ "that the GPU/CPU device for the " +#~ "training and testing is defined within" +#~ " the :code:`ctx` (context)." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " +#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" + +#~ msgid "You can now run your (centralized) MXNet machine learning workload:" +#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" + +#~ msgid "" +#~ "So far this should all look fairly" +#~ " familiar if you've used MXNet (or" +#~ " even PyTorch) before. Let's take the" +#~ " next step and use what we've " +#~ "built to create a simple federated " +#~ "learning system consisting of one server" +#~ " and two clients." +#~ msgstr "" +#~ "到目前为止,如果你以前使用过 MXNet(甚至 " +#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" + +#~ msgid "MXNet meets Flower" +#~ msgstr "MXNet 结合 Flower" + +#~ msgid "" +#~ "So far, it was not easily possible" +#~ " to use MXNet workloads for federated" +#~ " learning because federated learning is " +#~ "not supported in MXNet. Since Flower " +#~ "is fully agnostic towards the underlying" +#~ " machine learning framework, it can " +#~ "be used to federated arbitrary machine" +#~ " learning workloads. This section will " +#~ "show you how Flower can be used" +#~ " to federate our centralized MXNet " +#~ "workload." +#~ msgstr "" +#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " +#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " +#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" + +#~ msgid "" +#~ "The concept to federate an existing " +#~ "workload is always the same and " +#~ "easy to understand. We have to " +#~ "start a *server* and then use the" +#~ " code in :code:`mxnet_mnist.py` for the " +#~ "*clients* that are connected to the " +#~ "*server*. The *server* sends model " +#~ "parameters to the clients. The *clients*" +#~ " run the training and update the " +#~ "parameters. The updated parameters are " +#~ "sent back to the *server* which " +#~ "averages all received parameter updates. " +#~ "This describes one round of the " +#~ "federated learning process and we repeat" +#~ " this for multiple rounds." +#~ msgstr "" +#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " +#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" + +#~ msgid "" +#~ "Finally, we will define our *client* " +#~ "logic in :code:`client.py` and build " +#~ "upon the previously defined MXNet " +#~ "training in :code:`mxnet_mnist.py`. Our " +#~ "*client* needs to import :code:`flwr`, " +#~ "but also :code:`mxnet` to update the " +#~ "parameters on our MXNet model:" +#~ msgstr "" +#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " +#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " +#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " +#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" + +#~ msgid "" +#~ "Implementing a Flower *client* basically " +#~ "means implementing a subclass of either" +#~ " :code:`flwr.client.Client` or " +#~ ":code:`flwr.client.NumPyClient`. Our implementation " +#~ "will be based on " +#~ ":code:`flwr.client.NumPyClient` and we'll call " +#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " +#~ "slightly easier to implement than " +#~ ":code:`Client` if you use a framework" +#~ " with good NumPy interoperability (like " +#~ "PyTorch or MXNet) because it avoids " +#~ "some of the boilerplate that would " +#~ "otherwise be necessary. :code:`MNISTClient` " +#~ "needs to implement four methods, two " +#~ "methods for getting/setting model parameters," +#~ " one method for training the model," +#~ " and one method for testing the " +#~ "model:" +#~ msgstr "" +#~ "实现 Flower *client*基本上意味着要实现 " +#~ ":code:`flwr.client.Client` 或 " +#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " +#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " +#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" +#~ " 或 MXNet),:code:`NumPyClient` 比 " +#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " +#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" + +#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" + +#~ msgid "" +#~ "The challenging part is to transform " +#~ "the MXNet parameters from :code:`NDArray` " +#~ "to :code:`NumPy Arrays` to make it " +#~ "readable for Flower." +#~ msgstr "" +#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " +#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" + +#~ msgid "" +#~ "The two :code:`NumPyClient` methods " +#~ ":code:`fit` and :code:`evaluate` make use " +#~ "of the functions :code:`train()` and " +#~ ":code:`test()` previously defined in " +#~ ":code:`mxnet_mnist.py`. So what we really " +#~ "do here is we tell Flower through" +#~ " our :code:`NumPyClient` subclass which of" +#~ " our already defined functions to " +#~ "call for training and evaluation. We " +#~ "included type annotations to give you" +#~ " a better understanding of the data" +#~ " types that get passed around." +#~ msgstr "" +#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " +#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " +#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" +#~ " :code:`NumPyClient` 子类告知 Flower " +#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" + +#~ msgid "" +#~ "Having defined data loading, model " +#~ "architecture, training, and evaluation we " +#~ "can put everything together and train" +#~ " our :code:`Sequential` model on MNIST." +#~ msgstr "" +#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " +#~ ":code:`Sequential` 模型。" + +#~ msgid "" +#~ "in each window (make sure that the" +#~ " server is still running before you" +#~ " do so) and see your MXNet " +#~ "project run federated learning across " +#~ "two clients. Congratulations!" +#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" + +#~ msgid "" +#~ "The full source code for this " +#~ "example: `MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_. Our " +#~ "example is of course somewhat over-" +#~ "simplified because both clients load the" +#~ " exact same dataset, which isn't " +#~ "realistic. You're now prepared to " +#~ "explore this topic further. How about" +#~ " using a CNN or using a " +#~ "different dataset? How about adding more" +#~ " clients?" +#~ msgstr "" +#~ "此示例的完整源代码在:\"MXNet: From Centralized To " +#~ "Federated (Code) " +#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" +#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" + +#~ msgid "with the following command sequence:" +#~ msgstr "使用以下命令序列:" + +#~ msgid "" +#~ "In case you are a researcher you" +#~ " might be just fine using the " +#~ "self-signed certificates generated using " +#~ "the scripts which are part of this" +#~ " guide." +#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" + +#~ msgid "" +#~ "We are now going to show how " +#~ "to write a sever which uses the" +#~ " previously generated scripts." +#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" + +#~ msgid "" +#~ "When providing certificates, the server " +#~ "expects a tuple of three certificates." +#~ " :code:`Path` can be used to easily" +#~ " read the contents of those files " +#~ "into byte strings, which is the " +#~ "data type :code:`start_server` expects." +#~ msgstr "" +#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " +#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" -#~ msgid "Example: Walk-Through PyTorch & MNIST" -#~ msgstr "实例: PyTorch 和 MNIST 的演练" +#~ msgid "Flower server" +#~ msgstr "Flower 服务器" + +#~ msgid "flower-driver-api" +#~ msgstr "flower-driver-api" + +#~ msgid "flower-fleet-api" +#~ msgstr "flower-fleet-api" + +#~ msgid "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ msgstr "" +#~ ":py:obj:`start_driver `\\ " +#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" + +#~ msgid "Start a Flower Driver API server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" + +#~ msgid "" +#~ ":py:obj:`Driver `\\ " +#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ msgstr "" +#~ "Flower 1.0: ``start_server(..., " +#~ "config=flwr.server.ServerConfig(num_rounds=3, " +#~ "round_timeout=600.0), ...)``" + +#~ msgid "`Driver` class provides an interface to the Driver API." +#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:9091\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#~ msgid ":py:obj:`close `\\ \\(\\)" +#~ msgstr "server.strategy.Strategy" + +#~ msgid "Disconnect from the SuperLink if connected." +#~ msgstr "如果已连接,请断开与超级链接的连接。" + +#~ msgid "start\\_driver" +#~ msgstr "启动客户端" + +#~ msgid "" +#~ "The IPv4 or IPv6 address of the" +#~ " Driver API server. Defaults to " +#~ "`\"[::]:8080\"`." +#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" + +#~ msgid "" +#~ "A server implementation, either " +#~ "`flwr.server.Server` or a subclass thereof." +#~ " If no instance is provided, then " +#~ "`start_driver` will create one." +#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" + +#~ msgid "" +#~ "An implementation of the class " +#~ "`flwr.server.ClientManager`. If no implementation" +#~ " is provided, then `start_driver` will " +#~ "use `flwr.server.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_server` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" + +#~ msgid "The Driver object to use." +#~ msgstr "要使用的驱动程序对象。" + +#~ msgid "Starting a driver that connects to an insecure server:" +#~ msgstr "启动不安全的服务器:" + +#~ msgid "Starting a driver that connects to an SSL-enabled server:" +#~ msgstr "启动支持 SSL 的服务器:" + +#~ msgid "" +#~ ":py:obj:`run_simulation_from_cli " +#~ "`\\ \\(\\)" +#~ msgstr "" + +#~ msgid "Run Simulation Engine from the CLI." +#~ msgstr "" + +#~ msgid "run\\_simulation\\_from\\_cli" +#~ msgstr "运行模拟" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with MXNet to train a Sequential " +#~ "model on MNIST." +#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" + +#~ msgid "Quickstart MXNet" +#~ msgstr "快速入门 MXNet" + +#~ msgid "" +#~ "MXNet is no longer maintained and " +#~ "has been moved into `Attic " +#~ "`_. As a " +#~ "result, we would encourage you to " +#~ "use other ML frameworks alongside " +#~ "Flower, for example, PyTorch. This " +#~ "tutorial might be removed in future " +#~ "versions of Flower." +#~ msgstr "" + +#~ msgid "" +#~ "In this tutorial, we will learn " +#~ "how to train a :code:`Sequential` model" +#~ " on MNIST using Flower and MXNet." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" + +#~ msgid "Since we want to use MXNet, let's go ahead and install it:" +#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on MXNet´s `Hand-written Digit " +#~ "Recognition tutorial " +#~ "`_." +#~ msgstr "" +#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " +#~ "MXNet 的 `手写数字识别教程 " +#~ "`_\"。" + +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and MXNet related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" + +#~ msgid "In addition, define the device allocation in MXNet with:" +#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" + +#~ msgid "" +#~ "We use MXNet to load MNIST, a " +#~ "popular image classification dataset of " +#~ "handwritten digits for machine learning. " +#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" +#~ " downloads the training and test " +#~ "data." +#~ msgstr "" +#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" +#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" + +#~ msgid "" +#~ "Define the training and loss with " +#~ "MXNet. We train the model by " +#~ "looping over the dataset, measure the" +#~ " corresponding loss, and optimize it." +#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" + +#~ msgid "" +#~ "Next, we define the validation of " +#~ "our machine learning model. We loop " +#~ "over the test set and measure both" +#~ " loss and accuracy on the test " +#~ "set." +#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" + +#~ msgid "" +#~ "After defining the training and testing" +#~ " of a MXNet machine learning model," +#~ " we use these functions to implement" +#~ " a Flower client." +#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" + +#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" +#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" + +#~ msgid "" +#~ "After loading the dataset with " +#~ ":code:`load_data()` we perform one forward " +#~ "propagation to initialize the model and" +#~ " model parameters with :code:`model(init)`. " +#~ "Next, we implement a Flower client." +#~ msgstr "" +#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " +#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " +#~ "客户端。" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses MXNet." +#~ " Implementing :code:`NumPyClient` usually means" +#~ " defining the following methods " +#~ "(:code:`set_parameters` is optional though):" +#~ msgstr "" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" + +#~ msgid "They can be implemented in the following way:" +#~ msgstr "它们可以通过以下方式实现:" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`MNISTClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()` or " +#~ ":code:`fl.client.start_numpy_client()`. The string " +#~ ":code:`\"0.0.0.0:8080\"` tells the client " +#~ "which server to connect to. In our" +#~ " case we can run the server and" +#~ " the client on the same machine, " +#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" +#~ " we run a truly federated workload" +#~ " with the server and clients running" +#~ " on different machines, all that " +#~ "needs to change is the " +#~ ":code:`server_address` we pass to the " +#~ "client." +#~ msgstr "" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" +#~ " " +#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ " :code:`server_address`。" #~ msgid "" -#~ "In this tutorial we will learn, " -#~ "how to train a Convolutional Neural " -#~ "Network on MNIST using Flower and " -#~ "PyTorch." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 MNIST 上训练卷积神经网络。" +#~ "With both client and server ready, " +#~ "we can now run everything and see" +#~ " federated learning in action. Federated" +#~ " learning systems usually have a " +#~ "server and multiple clients. We " +#~ "therefore have to start the server " +#~ "first:" +#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #~ msgid "" -#~ "Since we want to use PyTorch to" -#~ " solve a computer vision task, let's" -#~ " go ahead an install PyTorch and " -#~ "the **torchvision** library:" -#~ msgstr "我们想用 PyTorch 来做计算机视觉任务,需要先安装 PyTorch 和 **torchvision** 库:" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-mxnet`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可在 :code:`examples/quickstart-" +#~ "mxnet` 中找到。" -#~ msgid "Ready... Set... Train!" -#~ msgstr "准备...设置...训练!" +#~ msgid ":code:`load_mnist()`" +#~ msgstr ":code:`load_mnist()`" + +#~ msgid "Loads the MNIST dataset using OpenML" +#~ msgstr "使用 OpenML 加载 MNIST 数据集" + +#~ msgid ":code:`shuffle()`" +#~ msgstr ":code:`shuffle()`" + +#~ msgid "Shuffles data and its label" +#~ msgstr "对数据及其标签进行洗牌" + +#~ msgid ":code:`partition()`" +#~ msgstr ":code:`partition()`" + +#~ msgid "Splits datasets into a number of partitions" +#~ msgstr "将数据集分割成多个分区" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on PyTorch's `Basic MNIST Example" -#~ " `_. " -#~ "This will allow you see how easy" -#~ " it is to wrap your code with" -#~ " Flower and begin training in a " -#~ "federated way. We provide you with " -#~ "two helper scripts, namely *run-" -#~ "server.sh*, and *run-clients.sh*. Don't " -#~ "be afraid to look inside, they are" -#~ " simple enough =)." +#~ "We load the MNIST dataset from " +#~ "`OpenML " +#~ "`_, a" +#~ " popular image classification dataset of" +#~ " handwritten digits for machine learning." +#~ " The utility :code:`utils.load_mnist()` downloads" +#~ " the training and test data. The " +#~ "training set is split afterwards into" +#~ " 10 partitions with :code:`utils.partition()`." #~ msgstr "" -#~ "现在我们已经安装了所有的依赖包,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " -#~ "PyTorch 的 `Basic MNIST Example " -#~ "`_。您会发现用 " -#~ "Flower 来封装您的代码并进行联邦学习训练是多么容易。我们为您提供了两个辅助脚本,即 *run-" -#~ "server.sh* 和 *run-clients.sh*。别害怕,它们很简单 =)。" +#~ "我们从 `OpenML `_ 中加载 " +#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " +#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " +#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "" -#~ "Go ahead and launch on a terminal" -#~ " the *run-server.sh* script first as" -#~ " follows:" -#~ msgstr "首先在终端上启动 *run-server.sh* 脚本,如下所示:" +#~ msgid "Let's get stated!" +#~ msgstr "让我们开始吧!" -#~ msgid "Now that the server is up and running, go ahead and launch the clients." -#~ msgstr "现在服务器已经启动并运行,请继续启动客户端。" +#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgstr "" -#~ msgid "" -#~ "Et voilà! You should be seeing the" -#~ " training procedure and, after a few" -#~ " iterations, the test accuracy for " -#~ "each client." -#~ msgstr "然后就可以了!您应该能看到训练过程,以及经过几次反复后,每个客户端的测试准确率。" +#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgstr "" -#~ msgid "Now, let's see what is really happening inside." -#~ msgstr "现在,让我们看看里面到底发生了什么。" +#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgstr "" -#~ msgid "" -#~ "Inside the server helper script *run-" -#~ "server.sh* you will find the following" -#~ " code that basically runs the " -#~ ":code:`server.py`" -#~ msgstr "在服务器辅助脚本 *run-server.sh* 中,你可以找到以下代码,这些代码基本上都是运行 :code:`server.py` 的代码" +#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgstr "" -#~ msgid "" -#~ "We can go a bit deeper and " -#~ "see that :code:`server.py` simply launches " -#~ "a server that will coordinate three " -#~ "rounds of training. Flower Servers are" -#~ " very customizable, but for simple " -#~ "workloads, we can start a server " -#~ "using the :ref:`start_server ` function and leave " -#~ "all the configuration possibilities at " -#~ "their default values, as seen below." +#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" #~ msgstr "" -#~ "我们可以再深入一点,:code:`server.py` 只是启动了一个服务器,该服务器将协调三轮训练。Flower " -#~ "服务器是非常容易修改的,但对于简单的工作,我们可以使用 :ref:`start_server `函数启动服务器,并将所有可能的配置保留为默认值,如下所示。" -#~ msgid "" -#~ "Next, let's take a look at the " -#~ "*run-clients.sh* file. You will see " -#~ "that it contains the main loop " -#~ "that starts a set of *clients*." -#~ msgstr "接下来,让我们看看 *run-clients.sh* 文件。您会看到它包含了用来启动多个 *客户端* 的代码。" +#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgstr "" -#~ msgid "" -#~ "**cid**: is the client ID. It is" -#~ " an integer that uniquely identifies " -#~ "client identifier." -#~ msgstr "**cid**:是客户 ID。它是一个整数,可唯一标识客户标识符。" +#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgstr "" -#~ msgid "**sever_address**: String that identifies IP and port of the server." -#~ msgstr "**sever_address**: 标识服务器 IP 和端口的字符串。" +#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgstr "" -#~ msgid "" -#~ "**nb_clients**: This defines the number " -#~ "of clients being created. This piece " -#~ "of information is not required by " -#~ "the client, but it helps us " -#~ "partition the original MNIST dataset to" -#~ " make sure that every client is " -#~ "working on unique subsets of both " -#~ "*training* and *test* sets." +#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" #~ msgstr "" -#~ "**nb_clients**: 这定义了正在创建的客户端数量。客户端并不需要这一信息,但它有助于我们对原始 " -#~ "MNIST 数据集进行划分,以确保每个客户端都在 *training* 和 *test*" -#~ " 数据集上有独立的数据。" -#~ msgid "" -#~ "Again, we can go deeper and look" -#~ " inside :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`. After going through the" -#~ " argument parsing code at the " -#~ "beginning of our :code:`main` function, " -#~ "you will find a call to " -#~ ":code:`mnist.load_data`. This function is " -#~ "responsible for partitioning the original " -#~ "MNIST datasets (*training* and *test*) " -#~ "and returning a :code:`torch.utils.data.DataLoader`" -#~ " s for each of them. We then" -#~ " instantiate a :code:`PytorchMNISTClient` object" -#~ " with our client ID, our DataLoaders," -#~ " the number of epochs in each " -#~ "round, and which device we want to" -#~ " use for training (CPU or GPU)." +#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" #~ msgstr "" -#~ "我们可以深入看一下 :code:`flwr_example/quickstart-" -#~ "pytorch/client.py`。查看 :code:`main` 函数开头的参数解析代码后,你会发现一个对" -#~ " :code:`mnist.load_data` 的调用。该函数负责分割原始 MNIST " -#~ "数据集(*training* 和 *test*),并为每个数据集返回一个 " -#~ ":code:`torch.utils.data.DataLoader` 。然后,我们实例化一个 " -#~ ":code:`PytorchMNISTClient` 对象,其中包含我们的客户端 ID、 " -#~ "DataLoader、每一轮中的遍历数,以及我们希望用于训练的设备(CPU 或 GPU)。" -#~ msgid "" -#~ "The :code:`PytorchMNISTClient` object when " -#~ "finally passed to :code:`fl.client.start_client` " -#~ "along with the server's address as " -#~ "the training process begins." +#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" #~ msgstr "" -#~ "当训练过程开始时,:code:`PytorchMNISTClient` 对象会连同服务器地址一起传递给 " -#~ ":code:`fl.client.start_client`。" -#~ msgid "A Closer Look" -#~ msgstr "仔细看一下" +#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgstr "" + +#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgstr "" + +#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgstr "" #~ msgid "" -#~ "Now, let's look closely into the " -#~ ":code:`PytorchMNISTClient` inside :code:`flwr_example" -#~ ".quickstart-pytorch.mnist` and see what it" -#~ " is doing:" +#~ "Currently, Flower provides two images, a" +#~ " ``base`` image and a ``superlink`` " +#~ "image. The base image, as the name" +#~ " suggests, contains basic dependencies that" +#~ " the SuperLink needs. This includes " +#~ "system dependencies, Python and Python " +#~ "tools. The SuperLink image is based " +#~ "on the base image, but it " +#~ "additionally installs the SuperLink using " +#~ "``pip``." #~ msgstr "" -#~ "现在,让我们仔细研究一下 :code:`flwr_example.quickstart-pytorch.mnist`" -#~ " 中的 :code:`PytorchMNISTClient`,看看它在做什么:" +#~ "目前,Flower " +#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" +#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" +#~ " Flower 服务器。" + +#~ msgid "``3.11``" +#~ msgstr "``1.0.0rc1``" + +#~ msgid "Defaults to ``22.04``." +#~ msgstr "默认为 ``22.04``。" + +#~ msgid "Building the SuperLink image" +#~ msgstr "启动服务器" + +#~ msgid "Defaults to ``flwr/base``." +#~ msgstr "默认为 ``flwr/server``。" + +#~ msgid "The Python version of the base image." +#~ msgstr "基础镜像的存储库名称。" + +#~ msgid "Defaults to ``py3.11``." +#~ msgstr "默认为 ``22.04``。" + +#~ msgid "Defaults to ``ubuntu22.04``." +#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" -#~ msgid "" -#~ "The first thing to notice is that" -#~ " :code:`PytorchMNISTClient` instantiates a CNN" -#~ " model inside its constructor" -#~ msgstr "首先要注意的是 :code:`PytorchMNISTClient` 在其构造函数中实例化了一个 CNN 模型" +#~ msgid "Defaults to ``flwr``." +#~ msgstr "默认为 ``flwr/server``。" #~ msgid "" -#~ "The code for the CNN is available" -#~ " under :code:`quickstart-pytorch.mnist` and " -#~ "it is reproduced below. It is the" -#~ " same network found in `Basic MNIST" -#~ " Example " -#~ "`_." -#~ msgstr "" -#~ "CNN 的代码可在 :code:`quickstart-pytorch.mnist` " -#~ "下找到,现复制如下。它与 `Basic MNIST Example " -#~ "`_中的网络相同。" +#~ "The name of image is ``flwr_superlink``" +#~ " and the tag ``0.1.0``. Remember that" +#~ " the build arguments as well as " +#~ "the name and tag can be adapted" +#~ " to your needs. These values serve" +#~ " as examples only." +#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" -#~ msgid "" -#~ "The second thing to notice is that" -#~ " :code:`PytorchMNISTClient` class inherits from" -#~ " the :code:`fl.client.Client`, and hence it" -#~ " must implement the following methods:" -#~ msgstr "" -#~ "第二件要注意的事是 :code:`PytorchMNISTClient` 类继承自 " -#~ ":code:`fl.client.Client`,因此它必须实现以下方法:" +#~ msgid "Creating New Messages" +#~ msgstr "创建新信息" #~ msgid "" -#~ "When comparing the abstract class to " -#~ "its derived class :code:`PytorchMNISTClient` " -#~ "you will notice that :code:`fit` calls" -#~ " a :code:`train` function and that " -#~ ":code:`evaluate` calls a :code:`test`: " -#~ "function." -#~ msgstr "" -#~ "将抽象类与其派生类 :code:`PytorchMNISTClient` 进行比较时,您会发现 " -#~ ":code:`fit` 调用了一个 :code:`train` 函数,而 " -#~ ":code:`evaluate` 则调用了一个 :code:`test`: 函数。" +#~ "This is a simple guide for " +#~ "creating a new type of message " +#~ "between the server and clients in " +#~ "Flower." +#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" #~ msgid "" -#~ "These functions can both be found " -#~ "inside the same :code:`quickstart-" -#~ "pytorch.mnist` module:" -#~ msgstr "这些函数都可以在同一个 :code:`quickstart-pytorch.mnist` 模块中找到:" +#~ "Let's suppose we have the following " +#~ "example functions in :code:`server.py` and " +#~ ":code:`numpy_client.py`..." +#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." + +#~ msgid "Server's side:" +#~ msgstr "在服务器端:" + +#~ msgid "Client's side:" +#~ msgstr "在客户端:" #~ msgid "" -#~ "Observe that these functions encapsulate " -#~ "regular training and test loops and " -#~ "provide :code:`fit` and :code:`evaluate` with" -#~ " final statistics for each round. You" -#~ " could substitute them with your " -#~ "custom train and test loops and " -#~ "change the network architecture, and the" -#~ " entire example would still work " -#~ "flawlessly. As a matter of fact, " -#~ "why not try and modify the code" -#~ " to an example of your liking?" -#~ msgstr "" -#~ "请注意,这些函数封装了常规的训练和测试循环,并为 :code:`fit` 和 " -#~ ":code:`evaluate` " -#~ "提供了每轮的最终统计数据。您可以用自定义的训练和测试循环来替代它们,并改变网络结构,整个示例仍然可以完美运行。事实上,为什么不按照自己的喜好修改代码呢?" +#~ "Let's now see what we need to " +#~ "implement in order to get this " +#~ "simple function between the server and" +#~ " client to work!" +#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" -#~ msgid "Give It a Try" -#~ msgstr "试试看" +#~ msgid "Message Types for Protocol Buffers" +#~ msgstr "协议缓冲区的信息类型" #~ msgid "" -#~ "Looking through the quickstart code " -#~ "description above will have given a " -#~ "good understanding of how *clients* and" -#~ " *servers* work in Flower, how to " -#~ "run a simple experiment, and the " -#~ "internals of a client wrapper. Here " -#~ "are a few things you could try " -#~ "on your own and get more " -#~ "experience with Flower:" +#~ "The first thing we need to do " +#~ "is to define a message type for" +#~ " the RPC system in :code:`transport.proto`." +#~ " Note that we have to do it " +#~ "for both the request and response " +#~ "messages. For more details on the " +#~ "syntax of proto3, please see the " +#~ "`official documentation `_." #~ msgstr "" -#~ "通过上面的快速入门代码描述,你将对 Flower " -#~ "中*客户端*和*服务器*的工作方式、如何运行一个简单的实验以及客户端封装器的内部结构有一个很好的了解。您可以自己尝试以下内容,以获得更多使用" -#~ " Flower 的经验:" +#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " +#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" +#~ " `_。" -#~ msgid "" -#~ "Try and change :code:`PytorchMNISTClient` so" -#~ " it can accept different architectures." -#~ msgstr "尝试修改 :code:`PytorchMNISTClient`,使其可以接受不同的架构。" +#~ msgid "Within the :code:`ServerMessage` block:" +#~ msgstr "在 :code:`ServerMessage` 代码块中:" -#~ msgid "" -#~ "Modify the :code:`train` function so " -#~ "that it accepts different optimizers" -#~ msgstr "修改 :code:`train` 函数,使其接受不同的优化器" +#~ msgid "Within the ClientMessage block:" +#~ msgstr "在 ClientMessage 代码块中:" #~ msgid "" -#~ "Modify the :code:`test` function so that" -#~ " it proves not only the top-1 " -#~ "(regular accuracy) but also the top-5" -#~ " accuracy?" -#~ msgstr "修改 :code:`test` 函数,使其不仅能输出前 1 名(常规精确度),还能证明前 5 名的精确度?" +#~ "Make sure to also add a field " +#~ "of the newly created message type " +#~ "in :code:`oneof msg`." +#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" -#~ msgid "" -#~ "Go larger! Try to adapt the code" -#~ " to larger images and datasets. Why" -#~ " not try training on ImageNet with" -#~ " a ResNet-50?" -#~ msgstr "让我们尝试让代码适应更大的图像和数据集。为什么不尝试使用 ResNet-50 在 ImageNet 上进行训练呢?" +#~ msgid "Once that is done, we will compile the file with:" +#~ msgstr "完成后,我们将使用:" -#~ msgid "You are ready now. Enjoy learning in a federated way!" -#~ msgstr "您现在已经准备就绪。尽情享受联邦学习的乐趣吧!" +#~ msgid "If it compiles successfully, you should see the following message:" +#~ msgstr "如果编译成功,你应该会看到以下信息:" -#~ msgid "Differential privacy" -#~ msgstr "差别隐私" +#~ msgid "Serialization and Deserialization Functions" +#~ msgstr "序列化和反序列化函数" #~ msgid "" -#~ "Flower provides differential privacy (DP) " -#~ "wrapper classes for the easy integration" -#~ " of the central DP guarantees " -#~ "provided by DP-FedAvg into training " -#~ "pipelines defined in any of the " -#~ "various ML frameworks that Flower is " -#~ "compatible with." +#~ "Our next step is to add functions" +#~ " to serialize and deserialize Python " +#~ "datatypes to or from our defined " +#~ "RPC message types. You should add " +#~ "these functions in :code:`serde.py`." #~ msgstr "" -#~ "Flower 提供了差分隐私 (DP) 封装类,可将 DP-FedAvg " -#~ "提供的核心 DP 轻松集成到 Flower 兼容的各种 ML " -#~ "框架中定义的训练模式中。" +#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " +#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" +#~ " :code:`serde.py` 中添加这些函数。" -#~ msgid "" -#~ "Please note that these components are" -#~ " still experimental; the correct " -#~ "configuration of DP for a specific " -#~ "task is still an unsolved problem." -#~ msgstr "请注意,这些组件仍处于试验阶段,如何为特定任务正确配置 DP 仍是一个尚未解决的问题。" +#~ msgid "The four functions:" +#~ msgstr "四种函数:" + +#~ msgid "Sending the Message from the Server" +#~ msgstr "从服务器发送信息" #~ msgid "" -#~ "The name DP-FedAvg is misleading " -#~ "since it can be applied on top " -#~ "of any FL algorithm that conforms " -#~ "to the general structure prescribed by" -#~ " the FedOpt family of algorithms." -#~ msgstr "DP-FedAvg 这个名称容易引起误解,因为它可以应用于任何符合 FedOpt 系列算法规定的一般结构的 FL 算法之上。" +#~ "Now write the request function in " +#~ "your Client Proxy class (e.g., " +#~ ":code:`grpc_client_proxy.py`) using the serde " +#~ "functions you just created:" +#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" -#~ msgid "DP-FedAvg" -#~ msgstr "DP-FedAvg" +#~ msgid "Receiving the Message by the Client" +#~ msgstr "由客户端接收信息" #~ msgid "" -#~ "DP-FedAvg, originally proposed by " -#~ "McMahan et al. [mcmahan]_ and extended" -#~ " by Andrew et al. [andrew]_, is " -#~ "essentially FedAvg with the following " -#~ "modifications." -#~ msgstr "DP-FedAvg 最初由McMahan等人提出,并由Andrew等人加以扩展。" +#~ "Last step! Modify the code in " +#~ ":code:`message_handler.py` to check the field" +#~ " of your message and call the " +#~ ":code:`example_response` function. Remember to " +#~ "use the serde functions!" +#~ msgstr "" +#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" +#~ " :code:`example_response` 函数。记住使用 serde 函数!" -#~ msgid "" -#~ "**Clipping** : The influence of each " -#~ "client's update is bounded by clipping" -#~ " it. This is achieved by enforcing" -#~ " a cap on the L2 norm of " -#~ "the update, scaling it down if " -#~ "needed." -#~ msgstr "**裁剪** : 裁剪会影响到每个客户端的模型参数。具体做法是对参数的 L2 准则设置上限,必要时将其缩减。" +#~ msgid "Within the handle function:" +#~ msgstr "在句柄函数内:" -#~ msgid "" -#~ "**Noising** : Gaussian noise, calibrated " -#~ "to the clipping threshold, is added " -#~ "to the average computed at the " -#~ "server." -#~ msgstr "**噪声** : 在服务器计算出的平均值中加入高斯噪声,该噪声根据剪切阈值进行校准。" +#~ msgid "And add a new function:" +#~ msgstr "并增加一个新函数:" -#~ msgid "" -#~ "The distribution of the update norm " -#~ "has been shown to vary from " -#~ "task-to-task and to evolve as " -#~ "training progresses. This variability is " -#~ "crucial in understanding its impact on" -#~ " differential privacy guarantees, emphasizing " -#~ "the need for an adaptive approach " -#~ "[andrew]_ that continuously adjusts the " -#~ "clipping threshold to track a " -#~ "prespecified quantile of the update norm" -#~ " distribution." -#~ msgstr "事实证明,参数更新准则的分布会随着任务的不同而变化,并随着训练的进展而演变。因此,我们采用了一种自适应方法,该方法会不断调整剪切阈值,以跟踪参数更新准则分布的预设量化值。" +#~ msgid "Hopefully, when you run your program you will get the intended result!" +#~ msgstr "希望您在运行程序时能得到预期的结果!" -#~ msgid "Simplifying Assumptions" -#~ msgstr "简化假设" +#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" -#~ msgid "" -#~ "We make (and attempt to enforce) a" -#~ " number of assumptions that must be" -#~ " satisfied to ensure that the " -#~ "training process actually realizes the " -#~ ":math:`(\\epsilon, \\delta)` guarantees the " -#~ "user has in mind when configuring " -#~ "the setup." -#~ msgstr "" -#~ "我们提出(并试图执行)了一系列必须满足的假设,以确保训练过程真正实现用户在配置设置时所定的 " -#~ ":math:`(\\epsilon,\\delta)` 。" +#~ msgid "Run Flower server (Driver API)." +#~ msgstr "flower-driver-api" -#~ msgid "" -#~ "**Fixed-size subsampling** :Fixed-size " -#~ "subsamples of the clients must be " -#~ "taken at each round, as opposed to" -#~ " variable-sized Poisson subsamples." -#~ msgstr "** 固定大小的子样本** :与可变大小的泊松分布子样本相比,每轮必须抽取固定大小的客户端子样本。" +#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" -#~ msgid "" -#~ "**Unweighted averaging** : The contributions" -#~ " from all the clients must weighted" -#~ " equally in the aggregate to " -#~ "eliminate the requirement for the server" -#~ " to know in advance the sum of" -#~ " the weights of all clients available" -#~ " for selection." -#~ msgstr "**非加权平均**: 所有客户端的贡献必须加权相等,这样服务器就不需要事先知道所有客户的权重总和。" +#~ msgid "Run Flower server (Fleet API)." +#~ msgstr "Flower 服务器。" -#~ msgid "" -#~ "**No client failures** : The set " -#~ "of available clients must stay constant" -#~ " across all rounds of training. In" -#~ " other words, clients cannot drop out" -#~ " or fail." -#~ msgstr "**没有失败的客户端** : 在各轮训练中,可用客户端的数量必须保持不变。换句话说,客户端不能退出或失败。" +#~ msgid "Unreleased" +#~ msgstr "尚未发布" -#~ msgid "" -#~ "The first two are useful for " -#~ "eliminating a multitude of complications " -#~ "associated with calibrating the noise to" -#~ " the clipping threshold, while the " -#~ "third one is required to comply " -#~ "with the assumptions of the privacy " -#~ "analysis." -#~ msgstr "前两种方法有助于消除将噪声校准为削波阈值所带来的诸多复杂问题,而第三种方法则需要符合隐私分析的假设。" +#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" -#~ msgid "" -#~ "These restrictions are in line with " -#~ "constraints imposed by Andrew et al. " -#~ "[andrew]_." -#~ msgstr "这些限制与 Andrew 等人所施加的限制一致。" +#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" -#~ msgid "Customizable Responsibility for Noise injection" -#~ msgstr "可定制的噪声注入" +#~ msgid "|2bc8e069228d4873804061ff4a95048c|" +#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" -#~ msgid "" -#~ "In contrast to other implementations " -#~ "where the addition of noise is " -#~ "performed at the server, you can " -#~ "configure the site of noise injection" -#~ " to better match your threat model." -#~ " We provide users with the " -#~ "flexibility to set up the training " -#~ "such that each client independently adds" -#~ " a small amount of noise to the" -#~ " clipped update, with the result that" -#~ " simply aggregating the noisy updates " -#~ "is equivalent to the explicit addition" -#~ " of noise to the non-noisy " -#~ "aggregate at the server." -#~ msgstr "与其他在服务器上添加噪声的实现方法不同,您可以配置噪声注入的位置,以便更好地匹配您的威胁模型。我们为用户提供了设置训练的灵活性,使每个客户端都能独立地为剪切参数更新添加少量噪声,这样,只需聚合噪声更新,就相当于在服务器上为非噪声聚合添加噪声了。" +#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" -#~ msgid "" -#~ "To be precise, if we let :math:`m`" -#~ " be the number of clients sampled " -#~ "each round and :math:`\\sigma_\\Delta` be " -#~ "the scale of the total Gaussian " -#~ "noise that needs to be added to" -#~ " the sum of the model updates, " -#~ "we can use simple maths to show" -#~ " that this is equivalent to each " -#~ "client adding noise with scale " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}`." -#~ msgstr "" -#~ "准确地说,我们假设每轮采样的客户端数量为:math:`m`,:math:`\\sigma_\\Delta` " -#~ "为需要添加到模型更新总和中的总高斯噪声的规模,我们就可以用简单的数学方法证明了,这相当于每个客户端都添加了规模为 " -#~ ":math:`\\sigma_\\Delta/\\sqrt{m}` 的噪声。" +#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" -#~ msgid "Wrapper-based approach" -#~ msgstr "基于封装的方法" +#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" -#~ msgid "" -#~ "Introducing DP to an existing workload" -#~ " can be thought of as adding an" -#~ " extra layer of security around it." -#~ " This inspired us to provide the " -#~ "additional server and client-side logic" -#~ " needed to make the training process" -#~ " differentially private as wrappers for " -#~ "instances of the :code:`Strategy` and " -#~ ":code:`NumPyClient` abstract classes respectively." -#~ " This wrapper-based approach has the" -#~ " advantage of being easily composable " -#~ "with other wrappers that someone might" -#~ " contribute to the Flower library in" -#~ " the future, e.g., for secure " -#~ "aggregation. Using Inheritance instead can " -#~ "be tedious because that would require" -#~ " the creation of new sub- classes " -#~ "every time a new class implementing " -#~ ":code:`Strategy` or :code:`NumPyClient` is " -#~ "defined." -#~ msgstr "" -#~ "在现有工作负载中引入 DP " -#~ "可以被认为是在其周围增加了一层额外的安全性。受此启发,我们提供了额外的服务器端和客户端逻辑,分别作为 " -#~ ":code:`Strategy` 和 :code:`NumPyClient` " -#~ "抽象类实例的封装器,使训练过程具有不同的隐私性。这种基于封装器的方法的优点是可以很容易地与将来有人贡献给 Flower " -#~ "的其他封装器(例如用于安全聚合的封装器)进行组合。使用继承可能会比较繁琐,因为每次定义实现 :code:`Strategy`" -#~ " 或 :code:`NumPyClient` 的新类时,都需要创建新的子类。" +#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" -#~ msgid "" -#~ "The first version of our solution " -#~ "was to define a decorator whose " -#~ "constructor accepted, among other things, " -#~ "a boolean-valued variable indicating " -#~ "whether adaptive clipping was to be " -#~ "enabled or not. We quickly realized " -#~ "that this would clutter its " -#~ ":code:`__init__()` function with variables " -#~ "corresponding to hyperparameters of adaptive" -#~ " clipping that would remain unused " -#~ "when it was disabled. A cleaner " -#~ "implementation could be achieved by " -#~ "splitting the functionality into two " -#~ "decorators, :code:`DPFedAvgFixed` and " -#~ ":code:`DPFedAvgAdaptive`, with the latter sub-" -#~ " classing the former. The constructors " -#~ "for both classes accept a boolean " -#~ "parameter :code:`server_side_noising`, which, as " -#~ "the name suggests, determines where " -#~ "noising is to be performed." -#~ msgstr "" -#~ "我们的第一版解决方案是定义一个装饰器,其构造函数接受一个布尔值变量,表示是否启用自适应剪裁。我们很快意识到,这样会使其 " -#~ ":code:`__init__()` " -#~ "函数中与自适应裁剪超参数相对应的变量变得杂乱无章,而这些变量在自适应裁剪被禁用时将保持未使用状态。要实现更简洁的功能,可以将该功能拆分为两个装饰器,即" -#~ " :code:`DPFedAvgFixed` 和 " -#~ ":code:`DPFedAvgAdaptive`,后者是前者的子类。这两个类的构造函数都接受一个布尔参数 " -#~ ":code:`server_side_noising`,顾名思义,它决定了在哪里加噪声。" +#~ msgid "|241fc906441a4f038c625a19d30d01b2|" +#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" -#~ msgid "" -#~ "The server-side capabilities required " -#~ "for the original version of DP-" -#~ "FedAvg, i.e., the one which performed" -#~ " fixed clipping, can be completely " -#~ "captured with the help of wrapper " -#~ "logic for just the following two " -#~ "methods of the :code:`Strategy` abstract " -#~ "class." -#~ msgstr "" -#~ "只需对 :code:`Strategy` 抽象类的以下两个方法进行封装,就能完全捕获 DP-" -#~ "FedAvg 原始版本(即执行固定剪裁的版本)所需的服务器端功能。" +#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" -#~ msgid "" -#~ ":code:`configure_fit()` : The config " -#~ "dictionary being sent by the wrapped " -#~ ":code:`Strategy` to each client needs to" -#~ " be augmented with an additional " -#~ "value equal to the clipping threshold" -#~ " (keyed under :code:`dpfedavg_clip_norm`) and," -#~ " if :code:`server_side_noising=true`, another one" -#~ " equal to the scale of the " -#~ "Gaussian noise that needs to be " -#~ "added at the client (keyed under " -#~ ":code:`dpfedavg_noise_stddev`). This entails " -#~ "*post*-processing of the results returned " -#~ "by the wrappee's implementation of " -#~ ":code:`configure_fit()`." -#~ msgstr "" -#~ ":code:`configure_fit()` :由封装的 :code:`Strategy` " -#~ "发送到每个客户端的配置字典需要使用等于裁剪阈值的附加值(在 :code:`dpfedavg_clip_norm` " -#~ "下键入)进行扩充。并且,如果 " -#~ "server_side_noising=true,则另一个值等于需要在客户端添加的高斯噪声的大小(在 " -#~ "dpfedavg_noise_stddev 下键入)。这需要对封装后的configure_fit() " -#~ "所返回的结果进行后处理。" +#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" -#~ msgid "" -#~ ":code:`aggregate_fit()`: We check whether any" -#~ " of the sampled clients dropped out" -#~ " or failed to upload an update " -#~ "before the round timed out. In " -#~ "that case, we need to abort the" -#~ " current round, discarding any successful" -#~ " updates that were received, and move" -#~ " on to the next one. On the " -#~ "other hand, if all clients responded " -#~ "successfully, we must force the " -#~ "averaging of the updates to happen " -#~ "in an unweighted manner by intercepting" -#~ " the :code:`parameters` field of " -#~ ":code:`FitRes` for each received update " -#~ "and setting it to 1. Furthermore, " -#~ "if :code:`server_side_noising=true`, each update " -#~ "is perturbed with an amount of " -#~ "noise equal to what it would have" -#~ " been subjected to had client-side" -#~ " noising being enabled. This entails " -#~ "*pre*-processing of the arguments to " -#~ "this method before passing them on " -#~ "to the wrappee's implementation of " -#~ ":code:`aggregate_fit()`." -#~ msgstr "" -#~ ":code:`aggregate_fit()`: " -#~ "我们会检查是否有任何客户端在本轮超时前退出或未能上传参数更新。在这种情况下,我们需要中止当前一轮,丢弃已收到的所有参数更新,然后继续下一轮。另一方面,如果所有客户端都成功响应,我们就必须通过拦截" -#~ " :code:`FitRes` 的 :code:`parameters` 字段并将其设置为 " -#~ "1,强制以不加权的方式平均更新。此外,如果 " -#~ ":code:`server_side_noising=true`,每次更新都会受到一定量的噪声扰动,其扰动量相当于启用客户端噪声时的扰动量。" -#~ " 这就需要在将本方法的参数传递给封装的 :code:`aggregate_fit()` " -#~ "之前,对参数进行*预*处理。" +#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" -#~ msgid "" -#~ "We can't directly change the aggregation" -#~ " function of the wrapped strategy to" -#~ " force it to add noise to the" -#~ " aggregate, hence we simulate client-" -#~ "side noising to implement server-side" -#~ " noising." -#~ msgstr "我们无法直接改变封装策略的聚合函数,迫使它在聚合中添加噪声,因此我们模拟客户端噪声来实现服务器端噪声。" +#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" -#~ msgid "" -#~ "These changes have been put together " -#~ "into a class called :code:`DPFedAvgFixed`, " -#~ "whose constructor accepts the strategy " -#~ "being decorated, the clipping threshold " -#~ "and the number of clients sampled " -#~ "every round as compulsory arguments. The" -#~ " user is expected to specify the " -#~ "clipping threshold since the order of" -#~ " magnitude of the update norms is " -#~ "highly dependent on the model being " -#~ "trained and providing a default value" -#~ " would be misleading. The number of" -#~ " clients sampled at every round is" -#~ " required to calculate the amount of" -#~ " noise that must be added to " -#~ "each individual update, either by the" -#~ " server or the clients." -#~ msgstr "" -#~ "这些变化被整合到一个名为 :code:`DPFedAvgFixed` " -#~ "的类中,其构造函数接受被装饰的策略、剪切阈值和每轮采样的客户数作为必选参数。用户需要指定剪切阈值,因为参数更新规范的数量级在很大程度上取决于正在训练的模型,提供默认值会产生误导。每轮采样的客户端数量是计算服务器或客户在每次参数更新时添加的噪音量所必需的。" +#~ msgid "|72939caf6e294b0986fee6dde96614d7|" +#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" -#~ msgid "" -#~ "The additional functionality required to " -#~ "facilitate adaptive clipping has been " -#~ "provided in :code:`DPFedAvgAdaptive`, a " -#~ "subclass of :code:`DPFedAvgFixed`. It " -#~ "overrides the above-mentioned methods to" -#~ " do the following." -#~ msgstr "" -#~ "自适应剪裁所需的附加功能在 :code:`DPFedAvgAdaptive` 中提供,其是 " -#~ ":code:`DPFedAvgFixed` 的子类。它重写了上述方法,以实现以下功能。" +#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" -#~ msgid "" -#~ ":code:`configure_fit()` : It intercepts the" -#~ " config dict returned by " -#~ ":code:`super.configure_fit()` to add the " -#~ "key-value pair " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True` to it, " -#~ "which the client interprets as an " -#~ "instruction to include an indicator bit" -#~ " (1 if update norm <= clipping " -#~ "threshold, 0 otherwise) in the results" -#~ " returned by it." -#~ msgstr "" -#~ ":code:`configure_fit()`:它截取由 :code:`super.configure_fit()` " -#~ "返回的 config 字典,并在其中添加键-值对 " -#~ ":code:`dpfedavg_adaptive_clip_enabled:True\",客户端将其解释为在返回结果中包含一个指示位(如果参数更新范式" -#~ " <= 剪裁阈值,则为 1,否则为 0)的指令。" +#~ msgid "Edge Client Engine" +#~ msgstr "边缘客户端引擎" #~ msgid "" -#~ ":code:`aggregate_fit()` : It follows a " -#~ "call to :code:`super.aggregate_fit()` with one" -#~ " to :code:`__update_clip_norm__()`, a procedure" -#~ " which adjusts the clipping threshold " -#~ "on the basis of the indicator bits" -#~ " received from the sampled clients." -#~ msgstr ":code:`aggregate_fit()`:在调用:code:`super.aggregate_fit()`后,再调用:code:`__update_clip_norm__()`,该过程根据从采样客户端接收到的指示位调整裁剪阈值。" +#~ "`Flower `_ core framework " +#~ "architecture with Edge Client Engine" +#~ msgstr "具有边缘客户端引擎的`Flower `核心架构" -#~ msgid "" -#~ "The client-side capabilities required " -#~ "can be completely captured through " -#~ "wrapper logic for just the :code:`fit()`" -#~ " method of the :code:`NumPyClient` abstract" -#~ " class. To be precise, we need " -#~ "to *post-process* the update computed" -#~ " by the wrapped client to clip " -#~ "it, if necessary, to the threshold " -#~ "value supplied by the server as " -#~ "part of the config dictionary. In " -#~ "addition to this, it may need to" -#~ " perform some extra work if either" -#~ " (or both) of the following keys " -#~ "are also present in the dict." -#~ msgstr "" -#~ "客户端所需的功能完全可以通过 :code:`NumPyClient` 抽象类的 " -#~ ":code:`fit()` " -#~ "方法的封装逻辑来实现。准确地说,我们需要对封装客户端计算的参数更新进行处理,以便在必要时将其剪切到服务器作为配置字典的一部分提供的阈值。除此之外,如果配置字典中还存在以下任一(或两个)键,客户端可能还需要执行一些额外的工作。" +#~ msgid "Virtual Client Engine" +#~ msgstr "虚拟客户端引擎" #~ msgid "" -#~ ":code:`dpfedavg_noise_stddev` : Generate and " -#~ "add the specified amount of noise " -#~ "to the clipped update." -#~ msgstr "code:`dpfedavg_noise_stddev`:生成并在剪切参数更新中添加指定数量的噪声。" +#~ "`Flower `_ core framework " +#~ "architecture with Virtual Client Engine" +#~ msgstr "具有虚拟客户端引擎的`Flower `核心架构" + +#~ msgid "Virtual Client Engine and Edge Client Engine in the same workload" +#~ msgstr "可同步进行的虚拟客户端引擎和边缘客户端引擎" #~ msgid "" -#~ ":code:`dpfedavg_adaptive_clip_enabled` : Augment the" -#~ " metrics dict in the :code:`FitRes` " -#~ "object being returned to the server " -#~ "with an indicator bit, calculated as " -#~ "described earlier." -#~ msgstr "" -#~ ":code:`dpfedavg_adaptive_clip_enabled`:在返回给服务器的 :code:`FitRes`" -#~ " 对象中的度量值字典中增加一个指标位,计算方法如前所述。" +#~ "`Flower `_ core framework " +#~ "architecture with both Virtual Client " +#~ "Engine and Edge Client Engine" +#~ msgstr "具有虚拟客户端引擎和边缘客户端引擎的`Flower `核心架构" -#~ msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" -#~ msgstr "进行 :math:`(epsilon, \\delta)` 分析" +#~ msgid "Clone the flower repository." +#~ msgstr "**叉花仓库**" #~ msgid "" -#~ "Assume you have trained for :math:`n`" -#~ " rounds with sampling fraction :math:`q`" -#~ " and noise multiplier :math:`z`. In " -#~ "order to calculate the :math:`\\epsilon` " -#~ "value this would result in for a" -#~ " particular :math:`\\delta`, the following " -#~ "script may be used." +#~ "Please follow the first section on " +#~ ":doc:`Run Flower using Docker ` which " +#~ "covers this step in more detail." #~ msgstr "" -#~ "假设您已经训练了 :math:`n` 轮,采样比例为 :math:`q`,噪声乘数为 " -#~ ":math:`z`。为了计算特定 :math:`\\delta` 的 :math:`epsilon`" -#~ " 值,可以使用下面的脚本。" +#~ "请阅读 :doc:`Run Flower using Docker " +#~ "` " +#~ "的第一节,其中更详细地介绍了这一步骤。" -#~ msgid "Flower driver SDK." -#~ msgstr "Flower 服务器。" +#~ msgid "``22.04``" +#~ msgstr "``1.0.0rc1``" -#~ msgid "driver" -#~ msgstr "服务器" +#~ msgid "``23.0.1``" +#~ msgstr "``1.0.0rc1``" -#~ msgid "Get task results." -#~ msgstr "汇总训练结果。" +#~ msgid "``69.0.2``" +#~ msgstr "``1.0.0b0``" -#~ msgid "Request for run ID." -#~ msgstr "Flower 基线申请" +#~ msgid "``1.8.0``" +#~ msgstr "``1.0.0b0``" -#~ msgid "Get client IDs." -#~ msgstr "返回客户端(本身)。" +#~ msgid "Building the SuperLink/SuperNode or ServerApp image" +#~ msgstr "启动服务器" -#~ msgid "" -#~ "Flower usage examples used to be " -#~ "bundled with Flower in a package " -#~ "called ``flwr_example``. We are migrating " -#~ "those examples to standalone projects to" -#~ " make them easier to use. All " -#~ "new examples are based in the " -#~ "directory `examples " -#~ "`_." +#~ msgid "``1.8.0-py3.10-ubuntu22.04``" #~ msgstr "" -#~ "Flower 的使用示例曾与 Flower 捆绑在一个名为 ``flwr_example``" -#~ " 的软件包中。我们正在将这些示例迁移到独立项目中,以使它们更易于使用。所有新示例都位于目录 `examples " -#~ "`_。" -#~ msgid "Quickstart TensorFlow/Keras" -#~ msgstr "快速入门 TensorFlow/Keras" +#~ msgid "" +#~ "The following example creates a " +#~ "SuperLink/SuperNode or ServerApp image with" +#~ " the official Flower base image:" +#~ msgstr "下面的示例使用官方的 Flower 基本镜像 py3.11-ubuntu22.04 和 Flower 1.7.0 创建了一个服务器镜像:" -#~ msgid "Legacy Examples (`flwr_example`)" -#~ msgstr "传统示例 (`flwr_example`)" +#~ msgid "Trigger the CI for building the Docker images." +#~ msgstr "官方 Ubuntu Docker 映像的版本。" #~ msgid "" -#~ "The useage examples in `flwr_example` " -#~ "are deprecated and will be removed " -#~ "in the future. New examples are " -#~ "provided as standalone projects in " -#~ "`examples `_." +#~ "To trigger the workflow, a collaborator" +#~ " must create a ``workflow_dispatch`` event" +#~ " in the GitHub CI. This can be" +#~ " done either through the UI or " +#~ "via the GitHub CLI. The event " +#~ "requires only one input, the Flower " +#~ "version, to be released." #~ msgstr "" -#~ "在 `flwr_example` 中的使用示例已被弃用,今后将被移除。新示例将作为独立项目在 " -#~ "`examples `_" -#~ " 中提供。" -#~ msgid "Extra Dependencies" -#~ msgstr "额外依赖" +#~ msgid "**Via the UI**" +#~ msgstr "**审查 PR**" #~ msgid "" -#~ "The core Flower framework keeps a " -#~ "minimal set of dependencies. The " -#~ "examples demonstrate Flower in the " -#~ "context of different machine learning " -#~ "frameworks, so additional dependencies need" -#~ " to be installed before an example" -#~ " can be run." +#~ "Go to the ``Build docker images`` " +#~ "workflow `page " +#~ "`_." #~ msgstr "" -#~ "Flower 核心框架只保留了最低限度的依赖项。这些示例在不同机器学习框架的背景下演示了 " -#~ "Flower,因此在运行示例之前需要安装额外的依赖项。" -#~ msgid "For PyTorch examples::" -#~ msgstr "PyTorch 示例::" +#~ msgid "" +#~ "Click on the ``Run workflow`` button " +#~ "and type the new version of Flower" +#~ " in the ``Version of Flower`` input" +#~ " field." +#~ msgstr "" -#~ msgid "For TensorFlow examples::" -#~ msgstr "TensorFlow 示例::" +#~ msgid "Click on the **green** ``Run workflow`` button." +#~ msgstr "" -#~ msgid "For both PyTorch and TensorFlow examples::" -#~ msgstr "PyTorch 和 TensorFlow 示例::" +#~ msgid "**Via the GitHub CI**" +#~ msgstr "" #~ msgid "" -#~ "Please consult :code:`pyproject.toml` for a" -#~ " full list of possible extras " -#~ "(section :code:`[tool.poetry.extras]`)." +#~ "Make sure you are logged in via" +#~ " ``gh auth login`` and that the " +#~ "current working directory is the root" +#~ " of the Flower repository." #~ msgstr "" -#~ "请参阅 :code:`pyproject.toml`,了解可能的 extras 的完整列表(章节 " -#~ ":code:`[tool.poems.extras]`)。" - -#~ msgid "PyTorch Examples" -#~ msgstr "PyTorch 示例" #~ msgid "" -#~ "Our PyTorch examples are based on " -#~ "PyTorch 1.7. They should work with " -#~ "other releases as well. So far, we" -#~ " provide the following examples." -#~ msgstr "我们的 PyTorch 示例基于 PyTorch 1.7。它们应该也能在其他版本中使用。到目前为止,我们提供了以下示例。" +#~ "Trigger the workflow via ``gh workflow" +#~ " run docker-images.yml -f flwr-" +#~ "version=``." +#~ msgstr "" -#~ msgid "CIFAR-10 Image Classification" -#~ msgstr "CIFAR-10 图像分类" +#~ msgid "Example: JAX - Run JAX Federated" +#~ msgstr "示例: JAX - 运行联邦式 JAX" #~ msgid "" -#~ "`CIFAR-10 and CIFAR-100 " -#~ "`_ are " -#~ "popular RGB image datasets. The Flower" -#~ " CIFAR-10 example uses PyTorch to " -#~ "train a simple CNN classifier in a" -#~ " federated learning setup with two " -#~ "clients." +#~ "The simplest way to get started " +#~ "with Flower is by using the " +#~ "pre-made Docker images, which you can" +#~ " find on `Docker Hub " +#~ "`__. Supported " +#~ "architectures include ``amd64`` and " +#~ "``arm64v8``." #~ msgstr "" -#~ "CIFAR-10 和 CIFAR-100 " -#~ "``_ 是流行的 RGB" -#~ " 图像数据集。Flower CIFAR-10 示例使用 PyTorch " -#~ "在有两个客户端的联邦学习设置中训练一个简单的 CNN 分类器。" +#~ "开始使用 Flower 的最简单方法是使用预制的 Docker 镜像,您可以在 " +#~ "`Docker Hub `_" +#~ " 上找到这些镜像。" -#~ msgid "First, start a Flower server:" -#~ msgstr "首先,启动 Flower 服务器:" +#~ msgid "" +#~ "If you do not see the version " +#~ "of Docker but instead get an error" +#~ " saying that the command was not " +#~ "found, you will need to install " +#~ "Docker first. You can find installation" +#~ " instruction `here `_." +#~ msgstr "" +#~ "如果没有看到 Docker 的版本,而是出现找不到命令的错误,则需要先安装 Docker。你可以在" +#~ " `_ 找到安装说明。" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +#~ msgid "" +#~ "On Linux, Docker commands require " +#~ "``sudo`` privilege. If you want to " +#~ "avoid using ``sudo``, you can follow " +#~ "the `Post-installation steps " +#~ "`_" +#~ " on the official Docker website." +#~ msgstr "" +#~ "在 Linux 上,Docker 命令需要 ``sudo`` " +#~ "权限。如果你想避免使用 ``sudo``,可以按照 Docker 官方网站上的 `安装后步骤" +#~ " `_进行操作。" -#~ msgid "Then, start the two clients in a new terminal window:" -#~ msgstr "然后,在新的终端窗口中启动两个客户端:" +#~ msgid "" +#~ "To ensure optimal performance and " +#~ "compatibility, the SuperLink, SuperNode and" +#~ " ServerApp image must have the same" +#~ " version when running together. This " +#~ "guarantees seamless integration and avoids " +#~ "potential conflicts or issues that may" +#~ " arise from using different versions." +#~ msgstr "" +#~ "为确保最佳性能和兼容性,SuperLink、SuperNode 和 ServerApp " +#~ "映像在一起运行时必须具有相同的版本。这可确保无缝集成,并避免因使用不同版本而可能产生的潜在冲突或问题。" -#~ msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +#~ msgid "Flower SuperLink" +#~ msgstr "flower-superlink" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_cifar`。" +#~ msgid "Quickstart" +#~ msgstr "快速入门 JAX" -#~ msgid "ImageNet-2012 Image Classification" -#~ msgstr "ImageNet-2012 图像分类" +#~ msgid "If you're looking to try out Flower, you can use the following command:" +#~ msgstr "如果您想试用 Flower,可以使用以下命令:" #~ msgid "" -#~ "`ImageNet-2012 `_ is " -#~ "one of the major computer vision " -#~ "datasets. The Flower ImageNet example " -#~ "uses PyTorch to train a ResNet-18 " -#~ "classifier in a federated learning setup" -#~ " with ten clients." +#~ "The command pulls the Docker image " +#~ "with the tag ``1.8.0`` from Docker " +#~ "Hub. The tag specifies the Flower " +#~ "version. In this case, Flower 1.8.0. " +#~ "The ``--rm`` flag tells Docker to " +#~ "remove the container after it exits." #~ msgstr "" -#~ "ImageNet-2012 `_ " -#~ "是主要的计算机视觉数据集之一。Flower ImageNet 示例使用 PyTorch " -#~ "在有十个客户端的联邦学习设置中训练 ResNet-18 分类器。" +#~ "该命令将从 Docker Hub 提取标签为``1.7.0-py3.11-ubuntu22.04``的" +#~ " Docker 镜像。标签包含使用 Flower、Python 和 Ubuntu" +#~ " 的信息。在本例中,它使用了 Flower 1.7.0、Python 3.11 和" +#~ " Ubuntu 22.04。rm \"标记告诉 Docker 在退出后移除容器。" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +#~ msgid "" +#~ "The ``-p :`` flag tells " +#~ "Docker to map the ports " +#~ "``9091``/``9092`` of the host to " +#~ "``9091``/``9092`` of the container, allowing" +#~ " you to access the Driver API " +#~ "on ``http://localhost:9091`` and the Fleet " +#~ "API on ``http://localhost:9092``. Lastly, any" +#~ " flag that comes after the tag " +#~ "is passed to the Flower SuperLink. " +#~ "Here, we are passing the flag " +#~ "``--insecure``." +#~ msgstr "" +#~ "``-p :`` 标记会告诉 Docker 将主机的端口" +#~ " ``9091``/``9092`` 映射到容器的端口 ``9091``/`9092``,这样你就可以在" +#~ " ``http://localhost:9091`` 上访问 Driver API,在 " +#~ "``http://localhost:9092`` 上访问 Fleet " +#~ "API。最后,标签后面的任何标志都会传递给 Flower 服务器。在这里,我们传递的标志是 " +#~ "``--insecure`` 。" -#~ msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +#~ msgid "" +#~ "The ``--insecure`` flag enables insecure " +#~ "communication (using HTTP, not HTTPS) " +#~ "and should only be used for " +#~ "testing purposes. We strongly recommend " +#~ "enabling `SSL `__ when " +#~ "deploying to a production environment." +#~ msgstr "" +#~ "不安全 \"标志启用不安全通信(使用 HTTP,而非 " +#~ "HTTPS),只能用于测试目的。我们强烈建议在部署到生产环境时启用 `SSL " +#~ "`_。" -#~ msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/pytorch_imagenet`。" +#~ msgid "" +#~ "You can use ``--help`` to view all" +#~ " available flags that the SuperLink " +#~ "supports:" +#~ msgstr "您可以使用 ``--help`` 查看服务器支持的所有可用标记:" -#~ msgid "TensorFlow Examples" -#~ msgstr "TensorFlow 示例" +#~ msgid "Mounting a volume to store the state on the host system" +#~ msgstr "在主机系统上挂载卷以存储状态" #~ msgid "" -#~ "Our TensorFlow examples are based on " -#~ "TensorFlow 2.0 or newer. So far, " -#~ "we provide the following examples." -#~ msgstr "我们的 TensorFlow 示例基于 TensorFlow 2.0 或更新版本。到目前为止,我们提供了以下示例。" +#~ "If you want to persist the state" +#~ " of the SuperLink on your host " +#~ "system, all you need to do is " +#~ "specify a directory where you want " +#~ "to save the file on your host " +#~ "system and a name for the database" +#~ " file. By default, the SuperLink " +#~ "container runs with a non-root " +#~ "user called ``app`` with the user " +#~ "ID ``49999``. It is recommended to " +#~ "create new directory and change the " +#~ "user ID of the directory to " +#~ "``49999`` to ensure the mounted " +#~ "directory has the proper permissions. If" +#~ " you later want to delete the " +#~ "directory, you can change the user " +#~ "ID back to the current user ID " +#~ "by running ``sudo chown -R $USER:$(id" +#~ " -gn) state``." +#~ msgstr "" -#~ msgid "Fashion-MNIST Image Classification" -#~ msgstr "Fashion-MNIST 图像分类" +#~ msgid "" +#~ "Assuming all files we need are in" +#~ " the local ``certificates`` directory, we" +#~ " can use the flag ``--volume`` to " +#~ "mount the local directory into the " +#~ "``/app/certificates/`` directory of the " +#~ "container. This allows the SuperLink to" +#~ " access the files within the " +#~ "container. The ``ro`` stands for " +#~ "``read-only``. Docker volumes default to" +#~ " ``read-write``; that option tells " +#~ "Docker to make the volume ``read-" +#~ "only`` instead. Finally, we pass the " +#~ "names of the certificates and key " +#~ "file to the SuperLink with the " +#~ "``--ssl-ca-certfile``, ``--ssl-certfile`` " +#~ "and ``--ssl-keyfile`` flag." +#~ msgstr "" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" #~ msgid "" -#~ "`Fashion-MNIST `_ is often used as " -#~ "the \"Hello, world!\" of machine " -#~ "learning. We follow this tradition and" -#~ " provide an example which samples " -#~ "random local datasets from Fashion-MNIST" -#~ " and trains a simple image " -#~ "classification model over those partitions." +#~ "Because Flower containers, by default, " +#~ "run with a non-root user ``app``," +#~ " the mounted files and directories " +#~ "must have the proper permissions for " +#~ "the user ID ``49999``. For example, " +#~ "to change the user ID of all " +#~ "files in the ``certificates/`` directory, " +#~ "you can run ``sudo chown -R " +#~ "49999:49999 certificates/*``." #~ msgstr "" -#~ "`Fashion-MNIST `_ 经常被用作机器学习的 \"你好,世界!\"。我们遵循这一传统" -#~ ",提供了一个从Fashion-MNIST 中随机抽样本地数据集的示例,并在这些分区上训练一个简单的图像分类模型。" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +#~ msgid "" +#~ "The SuperNode Docker image comes with" +#~ " a pre-installed version of Flower" +#~ " and serves as a base for " +#~ "building your own SuperNode image." +#~ msgstr "超级节点 Docker 镜像预装了 Flower 版本,可作为构建自己的超级节点镜像的基础。" -#~ msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" -#~ msgstr "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +#~ msgid "" +#~ "We will use the ``quickstart-pytorch``" +#~ " example, which you can find in " +#~ "the Flower repository, to illustrate how" +#~ " you can dockerize your ClientApp." +#~ msgstr "" +#~ "我们将使用 \"quickstart-pytorch\"(快速启动-pytorch)示例来说明如何对 " +#~ "ClientApp 进行 docker 化。" #~ msgid "" -#~ "For more details, see " -#~ ":code:`src/py/flwr_example/tensorflow_fashion_mnist`." -#~ msgstr "更多详情,请参阅 :code:`src/py/flwr_example/tensorflow_fashion_mnist`。" +#~ "Before we can start, we need to" +#~ " meet a few prerequisites in our " +#~ "local development environment. You can " +#~ "skip the first part if you want" +#~ " to run your ClientApp instead of " +#~ "the ``quickstart-pytorch`` example." +#~ msgstr "在开始之前,我们需要在本地开发环境中满足一些先决条件。" -#~ msgid "``BASE_IMAGE_TAG``" -#~ msgstr "基本图像标签" +#~ msgid "Let's assume the following project layout:" +#~ msgstr "假设项目布局如下" -#~ msgid "The image tag of the base image." -#~ msgstr "基础图像的图像标记。" +#~ msgid "" +#~ "First, we need to create a " +#~ "``requirements.txt`` file in the directory " +#~ "where the ``ClientApp`` code is located." +#~ " In the file, we list all the" +#~ " dependencies that the ClientApp requires." +#~ msgstr "" +#~ "首先,我们需要在 ``ClientApp`` 代码所在的目录中创建一个 " +#~ "``requirements.txt`` 文件。在该文件中,我们列出了 ClientApp " +#~ "需要的所有依赖项。" #~ msgid "" -#~ "It is important to follow the " -#~ "instructions described in comments. For " -#~ "instance, in order to not break " -#~ "how our changelog system works, you " -#~ "should read the information above the" -#~ " ``Changelog entry`` section carefully. You" -#~ " can also checkout some examples and" -#~ " details in the :ref:`changelogentry` " -#~ "appendix." +#~ "Note that `flwr `__" +#~ " is already installed in the " +#~ "``flwr/supernode`` base image, so you " +#~ "only need to include other package " +#~ "dependencies in your ``requirements.txt``, " +#~ "such as ``torch``, ``tensorflow``, etc." #~ msgstr "" -#~ "请务必遵守注释中的说明。例如,为了不破坏我们的更新日志系统,你应该仔细阅读\"`更新日志条目``\"部分上面的信息。您还可以查看 " -#~ ":ref:`changelogentry` 附录中的一些示例和细节。" +#~ "请注意,`flwr `__ " +#~ "已经安装在`flwr/supernode``基础镜像中,因此只需在`requirements.txt``中包含其他依赖包,如`torch``、`tensorflow`等。" -#~ msgid "Open a PR (as shown above)" -#~ msgstr "打开 PR(如上图所示)" +#~ msgid "" +#~ "Next, we create a Dockerfile. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.supernode`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "接下来,我们创建一个 Dockerfile。如果使用 ``quickstart-pytorch``" +#~ " 示例,请在 ``examples/quickstart-pytorch`` 中创建一个名为" +#~ " ``Dockerfile.supernode`` 的新文件。" -#~ msgid "How to write a good PR title" -#~ msgstr "如何撰写好的公关标题" +#~ msgid "" +#~ "The ``Dockerfile.supernode`` contains the " +#~ "instructions that assemble the SuperNode " +#~ "image." +#~ msgstr "Dockerfile.supernode \"包含组装超级节点映像的指令。" #~ msgid "" -#~ "A well-crafted PR title helps team" -#~ " members quickly understand the purpose " -#~ "and scope of the changes being " -#~ "proposed. Here's a guide to help " -#~ "you write a good GitHub PR title:" -#~ msgstr "一个精心撰写的公关标题能帮助团队成员迅速了解所提修改的目的和范围。以下指南可帮助您撰写一个好的 GitHub PR 标题:" +#~ "In the first two lines, we " +#~ "instruct Docker to use the SuperNode " +#~ "image tagged ``nightly`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. Next, we install the" +#~ " ClientApp dependencies by copying the " +#~ "``requirements.txt`` file into the image " +#~ "and run ``pip install``. In the " +#~ "last two lines, we copy the " +#~ "``client.py`` module into the image and" +#~ " set the entry point to ``flower-" +#~ "client-app`` with the argument " +#~ "``client:app``. The argument is the " +#~ "object reference of the ClientApp " +#~ "(``:``) that will be run" +#~ " inside the ClientApp." +#~ msgstr "" +#~ "在前两行中,我们指示 Docker 使用标记为 ``nightly`` 的 " +#~ "SuperNode 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。接下来,我们通过将 ``requirements.txt`` " +#~ "文件复制到映像中并运行 ``pip install`` 来安装 ClientApp " +#~ "依赖项。最后两行,我们将 ``client.py`` 模块复制到映像中,并将入口点设置为 " +#~ "``flower-client-app``,参数为 ``client:app``。参数是将在 " +#~ "ClientApp 内运行的 ClientApp 的对象引用(``<模块>:<属性>``)。" + +#~ msgid "Building the SuperNode Docker image" +#~ msgstr "启动服务器" #~ msgid "" -#~ "1. Be Clear and Concise: Provide a" -#~ " clear summary of the changes in " -#~ "a concise manner. 1. Use Actionable " -#~ "Verbs: Start with verbs like \"Add,\"" -#~ " \"Update,\" or \"Fix\" to indicate " -#~ "the purpose. 1. Include Relevant " -#~ "Information: Mention the affected feature " -#~ "or module for context. 1. Keep it" -#~ " Short: Avoid lengthy titles for easy" -#~ " readability. 1. Use Proper Capitalization" -#~ " and Punctuation: Follow grammar rules " -#~ "for clarity." +#~ "We gave the image the name " +#~ "``flwr_supernode``, and the tag ``0.0.1``. " +#~ "Remember that the here chosen values " +#~ "only serve as an example. You can" +#~ " change them to your needs." #~ msgstr "" -#~ "1. 简明扼要: 以简明扼要的方式清楚地概述变化。1. 使用可操作的动词: 使用 " -#~ "\"添加\"、\"更新 \"或 \"修复 \"等动词来表明目的。1. 包含相关信息: " -#~ "提及受影响的功能或模块以了解上下文。1. 简短:避免冗长的标题,以方便阅读。1. 使用正确的大小写和标点符号:" -#~ " 遵守语法规则,以确保清晰。" +#~ "我们将图像命名为 ``flwr_supernode``,标签为 " +#~ "``0.0.1``。请记住,这里选择的值只是一个示例。您可以根据自己的需要进行更改。" + +#~ msgid "Running the SuperNode Docker image" +#~ msgstr "启动服务器" + +#~ msgid "Now that we have built the SuperNode image, we can finally run it." +#~ msgstr "现在,我们已经构建了超级节点镜像,终于可以运行它了。" + +#~ msgid "Let's break down each part of this command:" +#~ msgstr "让我们来分析一下这条命令的各个部分:" #~ msgid "" -#~ "Let's start with a few examples " -#~ "for titles that should be avoided " -#~ "because they do not provide meaningful" -#~ " information:" -#~ msgstr "让我们先举例说明几个应该避免使用的标题,因为它们不能提供有意义的信息:" +#~ "``--rm``: This option specifies that the" +#~ " container should be automatically removed" +#~ " when it stops." +#~ msgstr "`-rm``: 该选项指定容器停止时应自动移除。" -#~ msgid "Implement Algorithm" -#~ msgstr "执行算法" +#~ msgid "``--insecure``: This option enables insecure communication." +#~ msgstr "不安全\": 该选项启用不安全通信。" -#~ msgid "Add my_new_file.py to codebase" -#~ msgstr "在代码库中添加 my_new_file.py" +#~ msgid "" +#~ "``--superlink 192.168.1.100:9092``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Fleet" +#~ msgstr "``--server 192.168.1.100:9092``: 该选项指定超级链接舰队的地址" -#~ msgid "Improve code in module" -#~ msgstr "改进模块中的代码" +#~ msgid "API to connect to. Remember to update it with your SuperLink IP." +#~ msgstr "要连接的 API。记住用您的超级链接 IP 更新它。" -#~ msgid "Change SomeModule" -#~ msgstr "更改 SomeModule" +#~ msgid "" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your SuperNodes." +#~ msgstr "" +#~ "要测试在本地运行 Flower,可以创建一个 \"桥接网络 " +#~ "`__\",使用\"--网络 \"参数并传递 Docker " +#~ "网络的名称,以运行超级节点。" #~ msgid "" -#~ "Here are a few positive examples " -#~ "which provide helpful information without " -#~ "repeating how they do it, as that" -#~ " is already visible in the \"Files" -#~ " changed\" section of the PR:" -#~ msgstr "这里有几个正面的例子,提供了有用的信息,但没有重复他们是如何做的,因为在 PR 的 \"已更改文件 \"部分已经可以看到:" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "SuperNode binary. To see all available" +#~ " flags that the SuperNode supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower 超级节点二进制文件。要查看超级节点支持的所有可用标记,请运行" -#~ msgid "Update docs banner to mention Flower Summit 2023" -#~ msgstr "更新文件横幅,提及 2023 年 Flower 峰会" +#~ msgid "" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your SuperNode container." +#~ msgstr "要启用 SSL,我们需要将 PEM 编码的根证书挂载到 SuperNode 容器中。" -#~ msgid "Remove unnecessary XGBoost dependency" -#~ msgstr "移除不必要的 XGBoost 依赖性" +#~ msgid "" +#~ "Similar to the SuperNode image, the " +#~ "ServerApp Docker image comes with a " +#~ "pre-installed version of Flower and " +#~ "serves as a base for building your" +#~ " own ServerApp image." +#~ msgstr "" +#~ "与 SuperNode 映像类似,ServerApp Docker 映像也预装了 " +#~ "Flower 版本,可作为构建自己的 ServerApp 映像的基础。" -#~ msgid "Remove redundant attributes in strategies subclassing FedAvg" -#~ msgstr "删除 FedAvg 子类化策略中的多余属性" +#~ msgid "" +#~ "We will use the same ``quickstart-" +#~ "pytorch`` example as we do in the" +#~ " Flower SuperNode section. If you " +#~ "have not already done so, please " +#~ "follow the `SuperNode Prerequisites`_ before" +#~ " proceeding." +#~ msgstr "" +#~ "我们将使用与 \"Flower SuperNode \"部分相同的 " +#~ "\"quickstart-pytorch \"示例。如果您还没有这样做,请在继续之前遵循 " +#~ "\"SuperNode 先决条件\"。" + +#~ msgid "Creating a ServerApp Dockerfile" +#~ msgstr "创建 ServerApp Dockerfile" #~ msgid "" -#~ "Add CI job to deploy the staging" -#~ " system when the ``main`` branch " -#~ "changes" -#~ msgstr "添加 CI 作业,以便在 \"主 \"分支发生变化时部署暂存系统" +#~ "First, we need to create a " +#~ "Dockerfile in the directory where the" +#~ " ``ServerApp`` code is located. If " +#~ "you use the ``quickstart-pytorch`` " +#~ "example, create a new file called " +#~ "``Dockerfile.serverapp`` in ``examples/quickstart-" +#~ "pytorch``." +#~ msgstr "" +#~ "首先,我们需要在 ``ServerApp`` 代码所在的目录中创建一个 Dockerfile。如果使用" +#~ " ``quickstart-pytorch`` 示例,请在 ``examples" +#~ "/quickstart-pytorch`` 中创建一个名为 ``Dockerfile.serverapp``" +#~ " 的新文件。" #~ msgid "" -#~ "Add new amazing library which will " -#~ "be used to improve the simulation " -#~ "engine" -#~ msgstr "添加新的惊人库,用于改进模拟引擎" +#~ "The ``Dockerfile.serverapp`` contains the " +#~ "instructions that assemble the ServerApp " +#~ "image." +#~ msgstr "Dockerfile.serverapp \"包含组装 ServerApp 镜像的说明。" -#~ msgid "Changelog entry" -#~ msgstr "更新日志" +#~ msgid "" +#~ "In the first two lines, we " +#~ "instruct Docker to use the ServerApp " +#~ "image tagged ``1.8.0`` as a base " +#~ "image and set our working directory " +#~ "to ``/app``. The following instructions " +#~ "will now be executed in the " +#~ "``/app`` directory. In the last two " +#~ "lines, we copy the ``server.py`` module" +#~ " into the image and set the " +#~ "entry point to ``flower-server-app`` " +#~ "with the argument ``server:app``. The " +#~ "argument is the object reference of " +#~ "the ServerApp (``:``) that " +#~ "will be run inside the ServerApp " +#~ "container." +#~ msgstr "" +#~ "在前两行中,我们指示 Docker 使用标记为 ``1.8.0`` 的 " +#~ "ServerApp 镜像作为基础镜像,并将工作目录设置为 ``/app``。下面的指令将在 " +#~ "``/app`` 目录中执行。在最后两行中,我们将 ``server.py`` " +#~ "模块复制到映像中,并将入口点设置为 ``flower-server-app``,参数为 " +#~ "``server:app``。参数是将在 ServerApp 容器内运行的 ServerApp " +#~ "的对象引用(``<模块>:<属性>``)。" + +#~ msgid "Building the ServerApp Docker image" +#~ msgstr "启动服务器" + +#~ msgid "Running the ServerApp Docker image" +#~ msgstr "启动服务器" + +#~ msgid "Now that we have built the ServerApp image, we can finally run it." +#~ msgstr "现在我们已经构建了 ServerApp 镜像,终于可以运行它了。" #~ msgid "" -#~ "When opening a new PR, inside its" -#~ " description, there should be a " -#~ "``Changelog entry`` header." -#~ msgstr "打开一个新 PR 时,在其描述中应有一个 ``Changelog entry`` 标头。" +#~ "``--superlink 192.168.1.100:9091``: This option " +#~ "specifies the address of the SuperLinks" +#~ " Driver" +#~ msgstr "``--server 192.168.1.100:9091``: 此选项指定超级链接驱动程序的地址" #~ msgid "" -#~ "Above this header you should see " -#~ "the following comment that explains how" -#~ " to write your changelog entry:" -#~ msgstr "在页眉上方,你会看到以下注释,说明如何编写更新日志条目:" +#~ "To test running Flower locally, you " +#~ "can create a `bridge network " +#~ "`__, use the ``--network`` argument" +#~ " and pass the name of the " +#~ "Docker network to run your ServerApps." +#~ msgstr "" +#~ "要测试在本地运行 Flower,可以创建一个 ``bridge network " +#~ "`___,使用 ``--network`` 参数并传递 Docker " +#~ "网络的名称,以运行 ServerApps。" #~ msgid "" -#~ "Inside the following 'Changelog entry' " -#~ "section, you should put the description" -#~ " of your changes that will be " -#~ "added to the changelog alongside your" -#~ " PR title." -#~ msgstr "在下面的 \"更新日志条目 \"部分中,您应该在 PR 标题旁边写上将添加到更新日志中的更改描述。" +#~ "Any argument that comes after the " +#~ "tag is passed to the Flower " +#~ "ServerApp binary. To see all available" +#~ " flags that the ServerApp supports, " +#~ "run:" +#~ msgstr "标记后的任何参数都将传递给 Flower ServerApp 二进制文件。要查看 ServerApp 支持的所有可用标记,请运行" #~ msgid "" -#~ "If the section is completely empty " -#~ "(without any token) or non-existent, " -#~ "the changelog will just contain the " -#~ "title of the PR for the changelog" -#~ " entry, without any description." -#~ msgstr "如果该部分完全为空(没有任何标记)或不存在,更新日志将只包含更新日志条目的 PR 标题,而不包含任何描述。" +#~ "To enable SSL, we will need to " +#~ "mount a PEM-encoded root certificate " +#~ "into your ServerApp container." +#~ msgstr "要启用 SSL,需要 CA 证书、服务器证书和服务器私钥。" #~ msgid "" -#~ "If the section contains some text " -#~ "other than tokens, it will use it" -#~ " to add a description to the " -#~ "change." -#~ msgstr "如果该部分包含标记以外的文本,它将使用这些文本为更改添加说明。" +#~ "Assuming the certificate already exists " +#~ "locally, we can use the flag " +#~ "``--volume`` to mount the local " +#~ "certificate into the container's ``/app/`` " +#~ "directory. This allows the ServerApp to" +#~ " access the certificate within the " +#~ "container. Use the ``--root-certificates`` " +#~ "flags when starting the container." +#~ msgstr "" +#~ "假设我们需要的所有文件都在本地的 ``certificates`` 目录中,我们可以使用标记 " +#~ "``-v`` 将本地目录挂载到容器的 ``/app/`` " +#~ "目录中。这样,服务器就可以访问容器内的文件。最后,我们使用 ``--certificates`` " +#~ "标志将证书名称传递给服务器。" + +#~ msgid "Run with root user privileges" +#~ msgstr "" #~ msgid "" -#~ "If the section contains one of the" -#~ " following tokens it will ignore any" -#~ " other text and put the PR " -#~ "under the corresponding section of the" -#~ " changelog:" -#~ msgstr "如果该部分包含以下标记之一,它将忽略任何其他文本,并将 PR 放在更新日志的相应部分下:" +#~ "Flower Docker images, by default, run" +#~ " with a non-root user " +#~ "(username/groupname: ``app``, UID/GID: ``49999``)." +#~ " Using root user is not recommended" +#~ " unless it is necessary for specific" +#~ " tasks during the build process. " +#~ "Always make sure to run the " +#~ "container as a non-root user in" +#~ " production to maintain security best " +#~ "practices." +#~ msgstr "" -#~ msgid " is for classifying a PR as a general improvement." -#~ msgstr " 用于将 PR 划分为一般改进。" +#~ msgid "**Run a container with root user privileges**" +#~ msgstr "" -#~ msgid " is to not add the PR to the changelog" -#~ msgstr "表示不将 PR 添加到更新日志中" +#~ msgid "**Run the build process with root user privileges**" +#~ msgstr "" -#~ msgid " is to add a general baselines change to the PR" -#~ msgstr " 是指在 PR 中添加一般基线更改" +#~ msgid ":py:obj:`run_client_app `\\ \\(\\)" +#~ msgstr ":py:obj:`run_client_app `\\ \\(\\)" -#~ msgid " is to add a general examples change to the PR" -#~ msgstr " 是在 PR 中添加对一般示例的修改" +#~ msgid ":py:obj:`run_supernode `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" -#~ msgid " is to add a general sdk change to the PR" -#~ msgstr " 是指在 PR 中添加一般的 sdk 更改" +#~ msgid "d defaults to None." +#~ msgstr "d 默认为 \"无\"。" -#~ msgid " is to add a general simulations change to the PR" -#~ msgstr "(模拟)是在 PR 中添加一般模拟变更" +#~ msgid "Update R from dict/iterable E and F." +#~ msgstr "根据二进制/可迭代 E 和 F 更新 R。" -#~ msgid "Note that only one token should be used." -#~ msgstr "请注意,只能使用一个标记。" +#~ msgid "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_ENTER " +#~ "`\\" #~ msgid "" -#~ "Its content must have a specific " -#~ "format. We will break down what " -#~ "each possibility does:" -#~ msgstr "其内容必须有特定的格式。我们将分析每种可能性的作用:" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_DRIVER_API_LEAVE " +#~ "`\\" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains nothing or doesn't exist, " -#~ "the following text will be added " -#~ "to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分不包含任何内容或不存在,则会在更新日志中添加以下文本::" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_ENTER " +#~ "`\\" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains a description (and no " -#~ "token), the following text will be " -#~ "added to the changelog::" -#~ msgstr "如果 ``#### Changelog entry`` 部分包含描述(但没有标记),则会在更新日志中添加以下文本::" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`RUN_FLEET_API_LEAVE " +#~ "`\\" + +#~ msgid ":py:obj:`DRIVER_CONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_CONNECT `\\" + +#~ msgid ":py:obj:`DRIVER_DISCONNECT `\\" +#~ msgstr ":py:obj:`DRIVER_DISCONNECT `\\" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, nothing will change" -#~ " in the changelog." -#~ msgstr "如果 ``#### Changelog entry`` 部分包含 ````,更新日志中将不会有任何更改。" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_ENTER " +#~ "`\\" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text" -#~ " will be added to the changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" +#~ msgstr "" +#~ ":py:obj:`START_DRIVER_LEAVE " +#~ "`\\" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<基准线>``,则会在更新日志中添加以下文本::" +#~ "An identifier that can be used " +#~ "when loading a particular data partition" +#~ " for a ClientApp. Making use of " +#~ "this identifier is more relevant when" +#~ " conducting simulations." +#~ msgstr "为 ClientApp 加载特定数据分区时可使用的标识符。在进行模拟时,使用该标识符更有意义。" + +#~ msgid ":py:obj:`partition_id `\\" +#~ msgstr ":py:obj:`partition_id `\\" + +#~ msgid "An identifier telling which data partition a ClientApp should use." +#~ msgstr "告诉 ClientApp 应使用哪个数据分区的标识符。" + +#~ msgid ":py:obj:`run_superlink `\\ \\(\\)" +#~ msgstr ":py:obj:`run_superlink `\\ \\(\\)" + +#~ msgid "Run Flower SuperLink (Driver API and Fleet API)." +#~ msgstr "运行 Flower 服务器(Driver API 和 Fleet API)。" + +#~ msgid "run\\_driver\\_api" +#~ msgstr "flower-driver-api" + +#~ msgid "run\\_fleet\\_api" +#~ msgstr "run\\_fleet\\_api" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含``<示例>``,则会在更新日志中添加以下文本::" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg+ configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg+ " +#~ "配置并收集其公钥。- 共享密钥\": 在客户端之间广播公钥,并收集加密密钥。" + +#~ msgid "key shares." +#~ msgstr "关键股份。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following text " -#~ "will be added to the changelog::" -#~ msgstr "如果``### 更新日志条目``部分包含````,则会在更新日志中添加以下文本::" +#~ "The protocol involves four main stages:" +#~ " - 'setup': Send SecAgg configuration " +#~ "to clients and collect their public " +#~ "keys. - 'share keys': Broadcast public" +#~ " keys among clients and collect " +#~ "encrypted secret" +#~ msgstr "" +#~ "协议包括四个主要阶段: - 设置\": 向客户端发送 SecAgg " +#~ "配置并收集它们的公钥。- 共享密钥\": 在客户端之间广播公钥并收集加密密钥。" #~ msgid "" -#~ "If the ``### Changelog entry`` section" -#~ " contains ````, the following " -#~ "text will be added to the " -#~ "changelog::" -#~ msgstr "如果 ``### Changelog entry`` 部分包含 ````,则会在更新日志中添加以下文本::" +#~ "'A dictionary, e.g {\"\": , " +#~ "\"\": } to configure a " +#~ "backend. Values supported in are" +#~ " those included by " +#~ "`flwr.common.typing.ConfigsRecordValues`." +#~ msgstr "" +#~ "字典,例如 {\"\": , \"\": " +#~ "} 来配置后端。 中支持的值是 " +#~ "`flwr.common.typing.ConfigsRecordValues`中包含的值。" #~ msgid "" -#~ "Note that only one token must be" -#~ " provided, otherwise, only the first " -#~ "action (in the order listed above), " -#~ "will be performed." -#~ msgstr "请注意,必须只提供一个标记,否则将只执行第一个操作(按上述顺序)。" +#~ "The total number of clients in " +#~ "this simulation. This must be set " +#~ "if `clients_ids` is not set and " +#~ "vice-versa." +#~ msgstr "本次模拟的客户总数。如果未设置 `clients_ids`,则必须设置该参数,反之亦然。" -#~ msgid "Example: MXNet - Run MXNet Federated" -#~ msgstr "示例: MXNet - 运行联邦式 MXNet" +#~ msgid "" +#~ "In this tutorial we will learn how" +#~ " to train a Convolutional Neural " +#~ "Network on CIFAR10 using Flower and " +#~ "PyTorch." +#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 PyTorch 在 CIFAR10 上训练卷积神经网络。" #~ msgid "" -#~ "This tutorial will show you how to" -#~ " use Flower to build a federated " -#~ "version of an existing MXNet workload." -#~ " We are using MXNet to train a" -#~ " Sequential model on the MNIST " -#~ "dataset. We will structure the example" -#~ " similar to our `PyTorch - From " -#~ "Centralized To Federated " -#~ "`_ walkthrough. " -#~ "MXNet and PyTorch are very similar " -#~ "and a very good comparison between " -#~ "MXNet and PyTorch is given `here " -#~ "`_. First, " -#~ "we build a centralized training approach" -#~ " based on the `Handwritten Digit " -#~ "Recognition " -#~ "`_" -#~ " tutorial. Then, we build upon the" -#~ " centralized training code to run the" -#~ " training in a federated fashion." -#~ msgstr "" -#~ "本教程将向您展示如何使用 Flower 构建现有 MXNet 的联学习版本。我们将使用" -#~ " MXNet 在 MNIST 数据集上训练一个序列模型。另外,我们将采用与我们的 " -#~ "`PyTorch - 从集中式到联邦式 " -#~ "`_ 教程类似的示例结构。MXNet" -#~ " 和 PyTorch 非常相似,参考 `此处 " -#~ "`_对 MXNet " -#~ "和 PyTorch 进行了详细的比较。首先,我们根据 `手写数字识别 " -#~ "`" -#~ " 教程 建立了集中式训练方法。然后,我们在集中式训练代码的基础上,以联邦方式运行训练。" +#~ "*Clients* are responsible for generating " +#~ "individual weight-updates for the model" +#~ " based on their local datasets. These" +#~ " updates are then sent to the " +#~ "*server* which will aggregate them to" +#~ " produce a better model. Finally, the" +#~ " *server* sends this improved version " +#~ "of the model back to each " +#~ "*client*. A complete cycle of weight " +#~ "updates is called a *round*." +#~ msgstr "*客户端*负责在其本地数据集上更新模型参数。然后,这些参数会被发送到*服务器*,由*服务器*聚合后生成一个更好的模型。最后,*服务器*将改进后的模型发送回每个*客户端*。一个完整的模型参数更新周期称为一*轮*。" #~ msgid "" -#~ "Before we start setting up our " -#~ "MXNet example, we install the " -#~ ":code:`mxnet` and :code:`flwr` packages:" -#~ msgstr "在开始设置 MXNet 示例之前,我们先安装 :code:`mxnet` 和 :code:`flwr` 软件包:" +#~ "Now that we have a rough idea " +#~ "of what is going on, let's get " +#~ "started. We first need to install " +#~ "Flower. You can do this by running" +#~ " :" +#~ msgstr "现在,我们已经有了一个大致的概念了,那就让我们开始吧。首先,我们需要安装 Flower。可以通过运行 :" + +#~ msgid "" +#~ "Since we want to use PyTorch to" +#~ " solve a computer vision task, let's" +#~ " go ahead and install PyTorch and " +#~ "the **torchvision** library:" +#~ msgstr "既然我们想用 PyTorch 解决计算机视觉任务,那就继续安装 PyTorch 和 **torchvision** 库吧:" + +#~ msgid "" +#~ "Now that we have all our " +#~ "dependencies installed, let's run a " +#~ "simple distributed training with two " +#~ "clients and one server. Our training " +#~ "procedure and network architecture are " +#~ "based on PyTorch's `Deep Learning with" +#~ " PyTorch " +#~ "`_." +#~ msgstr "" +#~ "现在我们已经安装了所有的依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练过程和网络架构基于 " +#~ "PyTorch 的《Deep Learning with PyTorch " +#~ "`_》。" -#~ msgid "MNIST Training with MXNet" -#~ msgstr "使用 MXNet 进行 MNIST 训练" +#~ msgid "" +#~ "In a file called :code:`client.py`, " +#~ "import Flower and PyTorch related " +#~ "packages:" +#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 PyTorch 相关软件包:" + +#~ msgid "In addition, we define the device allocation in PyTorch with:" +#~ msgstr "此外,我们还在 PyTorch 中定义了设备分配:" #~ msgid "" -#~ "We begin with a brief description " -#~ "of the centralized training code based" -#~ " on a :code:`Sequential` model. If " -#~ "you want a more in-depth " -#~ "explanation of what's going on then " -#~ "have a look at the official `MXNet" -#~ " tutorial " -#~ "`_." +#~ "We use PyTorch to load CIFAR10, a" +#~ " popular colored image classification " +#~ "dataset for machine learning. The " +#~ "PyTorch :code:`DataLoader()` downloads the " +#~ "training and test data that are " +#~ "then normalized." #~ msgstr "" -#~ "首先,我们将简要介绍基于 :code:`Sequential` " -#~ "模型的集中式训练代码。如果您想获得更深入的解释,请参阅官方的 `MXNet教程 " -#~ "`_。" +#~ "我们使用 PyTorch 来加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。PyTorch " +#~ ":code:`DataLoader()`下载训练数据和测试数据,然后进行归一化处理。" #~ msgid "" -#~ "Let's create a new file " -#~ "called:code:`mxnet_mnist.py` with all the " -#~ "components required for a traditional " -#~ "(centralized) MNIST training. First, the " -#~ "MXNet package :code:`mxnet` needs to be" -#~ " imported. You can see that we " -#~ "do not yet import the :code:`flwr` " -#~ "package for federated learning. This " -#~ "will be done later." -#~ msgstr "" -#~ "让我们创建一个名为:code:`mxnet_mnist.py`的新文件,其中包含传统(集中式)MNIST " -#~ "训练所需的所有组件。首先,需要导入 MXNet 包 " -#~ ":code:`mxnet`。您可以看到,我们尚未导入用于联合学习的 :code:`flwr` 包,这将在稍后完成。" +#~ "Define the loss and optimizer with " +#~ "PyTorch. The training of the dataset " +#~ "is done by looping over the " +#~ "dataset, measure the corresponding loss " +#~ "and optimize it." +#~ msgstr "使用 PyTorch 定义损失和优化器。数据集的训练是通过循环数据集、测量相应的损失值并对其进行优化来完成的。" #~ msgid "" -#~ "The :code:`load_data()` function loads the " -#~ "MNIST training and test sets." -#~ msgstr ":code:`load_data()` 函数加载 MNIST 训练集和测试集。" +#~ "Define then the validation of the " +#~ "machine learning network. We loop over" +#~ " the test set and measure the " +#~ "loss and accuracy of the test set." +#~ msgstr "然后定义机器学习网络的验证。我们在测试集上循环,计算测试集的损失值和准确率。" #~ msgid "" -#~ "As already mentioned, we will use " -#~ "the MNIST dataset for this machine " -#~ "learning workload. The model architecture " -#~ "(a very simple :code:`Sequential` model) " -#~ "is defined in :code:`model()`." -#~ msgstr "" -#~ "如前所述,我们将使用 MNIST 数据集进行机器学习。模型架构(一个非常简单的 " -#~ ":code:`Sequential` 模型)在 :code:`model()` 中定义。" +#~ "After defining the training and testing" +#~ " of a PyTorch machine learning model," +#~ " we use the functions for the " +#~ "Flower clients." +#~ msgstr "在定义了 PyTorch 机器学习模型的训练和测试之后,我们将这些功能用于 Flower 客户端。" #~ msgid "" -#~ "We now need to define the training" -#~ " (function :code:`train()`) which loops " -#~ "over the training set and measures " -#~ "the loss for each batch of " -#~ "training examples." -#~ msgstr "现在,我们需要定义训练函数( :code:`train()`),该函数在训练集上循环训练,并计算每批训练示例的损失值。" +#~ "The Flower clients will use a " +#~ "simple CNN adapted from 'PyTorch: A " +#~ "60 Minute Blitz':" +#~ msgstr "Flower 客户端将使用一个简单的从“PyTorch: 60 分钟突击\"改编的CNN:" #~ msgid "" -#~ "The evaluation of the model is " -#~ "defined in function :code:`test()`. The " -#~ "function loops over all test samples " -#~ "and measures the loss and accuracy " -#~ "of the model based on the test " -#~ "dataset." -#~ msgstr "模型的评估在函数 :code:`test()` 中定义。该函数循环遍历所有测试样本,并根据测试数据集计算模型的损失值和准确度。" +#~ "After loading the data set with " +#~ ":code:`load_data()` we define the Flower " +#~ "interface." +#~ msgstr "使用 :code:`load_data()` 加载数据集后,我们定义了 Flower 接口。" #~ msgid "" -#~ "Having defined the data loading, model" -#~ " architecture, training, and evaluation we" -#~ " can put everything together and " -#~ "train our model on MNIST. Note " -#~ "that the GPU/CPU device for the " -#~ "training and testing is defined within" -#~ " the :code:`ctx` (context)." +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses " +#~ "PyTorch. Implementing :code:`NumPyClient` usually" +#~ " means defining the following methods " +#~ "(:code:`set_parameters` is optional though):" #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以把所有放在一起,在 MNIST " -#~ "上训练我们的模型了。请注意,用于训练和测试的 GPU/CPU 设备是在 :code:`ctx`中定义的。" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " PyTorch 时,它使 :code:`Client` 接口的实现变得更容易。实现 " +#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " +#~ "是可选的):" -#~ msgid "You can now run your (centralized) MXNet machine learning workload:" -#~ msgstr "现在,您可以运行(集中式)MXNet 机器学习工作:" +#~ msgid "which can be implemented in the following way:" +#~ msgstr "可以通过以下方式实现:" #~ msgid "" -#~ "So far this should all look fairly" -#~ " familiar if you've used MXNet (or" -#~ " even PyTorch) before. Let's take the" -#~ " next step and use what we've " -#~ "built to create a simple federated " -#~ "learning system consisting of one server" -#~ " and two clients." +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this example can " +#~ "be found in :code:`examples/quickstart-" +#~ "pytorch`." #~ msgstr "" -#~ "到目前为止,如果你以前使用过 MXNet(甚至 " -#~ "PyTorch),这一切看起来应该相当熟悉。下一步,让我们利用已构建的内容创建一个简单联邦学习系统(由一个服务器和两个客户端组成)。" - -#~ msgid "MXNet meets Flower" -#~ msgstr "MXNet 结合 Flower" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "pytorch` 中找到。" #~ msgid "" -#~ "So far, it was not easily possible" -#~ " to use MXNet workloads for federated" -#~ " learning because federated learning is " -#~ "not supported in MXNet. Since Flower " -#~ "is fully agnostic towards the underlying" -#~ " machine learning framework, it can " -#~ "be used to federated arbitrary machine" -#~ " learning workloads. This section will " -#~ "show you how Flower can be used" -#~ " to federate our centralized MXNet " -#~ "workload." +#~ "The :code:`self.bst` is used to keep " +#~ "the Booster objects that remain " +#~ "consistent across rounds, allowing them " +#~ "to store predictions from trees " +#~ "integrated in earlier rounds and " +#~ "maintain other essential data structures " +#~ "for training." #~ msgstr "" -#~ "由于 MXNet 目前不支持联邦学习,因此无法轻松地直接将 MXNet " -#~ "用于联邦学习之中。Flower 与底层机器学习框架完全无关,因此它可用于任意联邦式机器学习工作。本节将向你展示如何使用 " -#~ "Flower 将我们的集中式 MXNet 改为联邦式训练。" +#~ "代码:`self.bst`用于保存在各轮中保持一致的 Booster " +#~ "对象,使其能够存储在前几轮中集成的树的预测结果,并维护其他用于训练的重要数据结构。" + +#~ msgid "Implementing a Flower client" +#~ msgstr "实现 Flower 客户端" #~ msgid "" -#~ "The concept to federate an existing " -#~ "workload is always the same and " -#~ "easy to understand. We have to " -#~ "start a *server* and then use the" -#~ " code in :code:`mxnet_mnist.py` for the " -#~ "*clients* that are connected to the " -#~ "*server*. The *server* sends model " -#~ "parameters to the clients. The *clients*" -#~ " run the training and update the " -#~ "parameters. The updated parameters are " -#~ "sent back to the *server* which " -#~ "averages all received parameter updates. " -#~ "This describes one round of the " -#~ "federated learning process and we repeat" -#~ " this for multiple rounds." +#~ "To implement the Flower client, we " +#~ "create a subclass of " +#~ "``flwr.client.NumPyClient`` and implement the " +#~ "three methods ``get_parameters``, ``fit``, and" +#~ " ``evaluate``:" #~ msgstr "" -#~ "将现有模型框架联邦化的概念始终是相同的,也很容易理解。我们必须启动一个*服务器*,然后对连接到*服务器*的*客户端*使用 " -#~ ":code:`mxnet_mnist.py`中的代码。*服务器*向客户端发送模型参数,然后*客户端*运行训练并更新参数。更新后的参数被发回*服务器*,然后会对所有收到的参数更新进行平均聚合。以上描述的是一轮联邦学习过程,我们将重复进行多轮学习。" +#~ "为实现 Flower 客户端,我们创建了 ``flwr.client.NumPyClient`` " +#~ "的子类,并实现了 ``get_parameters``、``fit`` 和``evaluate`` " +#~ "三个方法:" #~ msgid "" -#~ "Finally, we will define our *client* " -#~ "logic in :code:`client.py` and build " -#~ "upon the previously defined MXNet " -#~ "training in :code:`mxnet_mnist.py`. Our " -#~ "*client* needs to import :code:`flwr`, " -#~ "but also :code:`mxnet` to update the " -#~ "parameters on our MXNet model:" +#~ "The function ``start_simulation`` accepts a" +#~ " number of arguments, amongst them " +#~ "the ``client_fn`` used to create " +#~ "``FlowerClient`` instances, the number of " +#~ "clients to simulate (``num_clients``), the " +#~ "number of federated learning rounds " +#~ "(``num_rounds``), and the strategy. The " +#~ "strategy encapsulates the federated learning" +#~ " approach/algorithm, for example, *Federated " +#~ "Averaging* (FedAvg)." #~ msgstr "" -#~ "最后,我们将在 :code:`client.py` 中定义我们的 *client* " -#~ "逻辑,并以之前在 :code:`mxnet_mnist.py` 中定义的 MXNet " -#~ "训练为基础。我们的 *client* 不仅需要导入 :code:`flwr`,还需要导入 " -#~ ":code:`mxnet`,以更新 MXNet 模型的参数:" +#~ "函数 ``start_simulation`` 接受许多参数,其中包括用于创建 " +#~ "``FlowerClient`` 实例的 " +#~ "``client_fn``、要模拟的客户端数量(``num_clients``)、联邦学习轮数(``num_rounds``)和策略。策略封装了联邦学习方法/算法,例如*联邦平均*" +#~ " (FedAvg)。" #~ msgid "" -#~ "Implementing a Flower *client* basically " -#~ "means implementing a subclass of either" -#~ " :code:`flwr.client.Client` or " -#~ ":code:`flwr.client.NumPyClient`. Our implementation " -#~ "will be based on " -#~ ":code:`flwr.client.NumPyClient` and we'll call " -#~ "it :code:`MNISTClient`. :code:`NumPyClient` is " -#~ "slightly easier to implement than " -#~ ":code:`Client` if you use a framework" -#~ " with good NumPy interoperability (like " -#~ "PyTorch or MXNet) because it avoids " -#~ "some of the boilerplate that would " -#~ "otherwise be necessary. :code:`MNISTClient` " -#~ "needs to implement four methods, two " -#~ "methods for getting/setting model parameters," -#~ " one method for training the model," -#~ " and one method for testing the " -#~ "model:" +#~ "The only thing left to do is " +#~ "to tell the strategy to call this" +#~ " function whenever it receives evaluation" +#~ " metric dictionaries from the clients:" +#~ msgstr "剩下要做的就是告诉策略,每当它从客户端接收到评估度量字典时,都要调用这个函数:" + +#~ msgid "|93b02017c78049bbbd5ae456dcb2c91b|" #~ msgstr "" -#~ "实现 Flower *client*基本上意味着要实现 " -#~ ":code:`flwr.client.Client` 或 " -#~ ":code:`flwr.client.NumPyClient` 的子类。我们的代码实现将基于 " -#~ ":code:`flwr.client.NumPyClient`,并将其命名为 " -#~ ":code:`MNISTClient`。如果使用具有良好 NumPy 互操作性的框架(如 PyTorch" -#~ " 或 MXNet),:code:`NumPyClient` 比 " -#~ ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`MNISTClient` " -#~ "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#~ msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" -#~ msgstr "将 MXNet :code:`NDArray` 转换为 NumPy :code:`ndarray`" +#~ msgid "|01471150fd5144c080a176b43e92a3ff|" +#~ msgstr "" -#~ msgid "" -#~ "The challenging part is to transform " -#~ "the MXNet parameters from :code:`NDArray` " -#~ "to :code:`NumPy Arrays` to make it " -#~ "readable for Flower." +#~ msgid "|9bc21c7dbd17444a8f070c60786e3484|" #~ msgstr "" -#~ "具有挑战性的部分是将 MXNet 参数从 :code:`NDArray` 转换为 " -#~ ":code:`NumPy Arrays` 以便 Flower 可以读取。" -#~ msgid "" -#~ "The two :code:`NumPyClient` methods " -#~ ":code:`fit` and :code:`evaluate` make use " -#~ "of the functions :code:`train()` and " -#~ ":code:`test()` previously defined in " -#~ ":code:`mxnet_mnist.py`. So what we really " -#~ "do here is we tell Flower through" -#~ " our :code:`NumPyClient` subclass which of" -#~ " our already defined functions to " -#~ "call for training and evaluation. We " -#~ "included type annotations to give you" -#~ " a better understanding of the data" -#~ " types that get passed around." +#~ msgid "|3047bbce54b34099ae559963d0420d79|" #~ msgstr "" -#~ "这两个 :code:`NumPyClient` 方法 :code:`fit` 和 " -#~ ":code:`evaluate` 使用了之前在 :code:`mxnet_mnist.py` " -#~ "中定义的函数 :code:`train()` 和 :code:`test()`。因此,我们要做的就是通过" -#~ " :code:`NumPyClient` 子类告知 Flower " -#~ "在训练和评估时要调用哪些已定义的函数。我们加入了类型注解,以便让您更好地理解传递的数据类型。" -#~ msgid "" -#~ "Having defined data loading, model " -#~ "architecture, training, and evaluation we " -#~ "can put everything together and train" -#~ " our :code:`Sequential` model on MNIST." +#~ msgid "|e9f8ce948593444fb838d2f354c7ec5d|" #~ msgstr "" -#~ "在定义了数据加载、模型架构、训练和评估之后,我们就可以将所有内容整合在一起,在 MNIST 上训练我们的 " -#~ ":code:`Sequential` 模型。" -#~ msgid "" -#~ "in each window (make sure that the" -#~ " server is still running before you" -#~ " do so) and see your MXNet " -#~ "project run federated learning across " -#~ "two clients. Congratulations!" -#~ msgstr "确保服务器仍在运行后,然后就能在每个窗口中看到 MXNet 项目在两个客户端上运行联邦学习了。祝贺!" +#~ msgid "|c24c1478b30e4f74839208628a842d1e|" +#~ msgstr "" -#~ msgid "" -#~ "The full source code for this " -#~ "example: `MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_. Our " -#~ "example is of course somewhat over-" -#~ "simplified because both clients load the" -#~ " exact same dataset, which isn't " -#~ "realistic. You're now prepared to " -#~ "explore this topic further. How about" -#~ " using a CNN or using a " -#~ "different dataset? How about adding more" -#~ " clients?" +#~ msgid "|1b3613d7a58847b59e1d3180802dbc09|" #~ msgstr "" -#~ "此示例的完整源代码在:\"MXNet: From Centralized To " -#~ "Federated (Code) " -#~ "`_。当然,我们的示例有些过于简单,因为两个客户端都加载了完全相同的数据集,这并不真实。现在您已经准备好进一步探讨了。使用" -#~ " CNN 或使用不同的数据集会如何?添加更多客户端会如何?" -#~ msgid "with the following command sequence:" -#~ msgstr "使用以下命令序列:" +#~ msgid "|9980b5213db547d0b8024a50992b9e3f|" +#~ msgstr "" + +#~ msgid "|c7afb4c92d154bfaa5e8cb9a150e17f1|" +#~ msgstr "" + +#~ msgid "|032eb6fed6924ac387b9f13854919196|" +#~ msgstr "" + +#~ msgid "|fbf225add7fd4df5a9bf25a95597d954|" +#~ msgstr "" + +#~ msgid "|7efbe3d29d8349b89594e8947e910525|" +#~ msgstr "" + +#~ msgid "|329fb3c04c744eda83bb51fa444c2266|" +#~ msgstr "" + +#~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgstr "" + +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" + +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" #~ msgid "" -#~ "In case you are a researcher you" -#~ " might be just fine using the " -#~ "self-signed certificates generated using " -#~ "the scripts which are part of this" -#~ " guide." -#~ msgstr "如果你是一名研究人员,使用本指南中的脚本生成的自签名证书就可以了。" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "run\\_server\\_app" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" #~ msgid "" -#~ "We are now going to show how " -#~ "to write a sever which uses the" -#~ " previously generated scripts." -#~ msgstr "现在,我们将展示如何编写一个使用先前生成的脚本的服务器。" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" +#~ "创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个" +#~ " Client " +#~ "类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" +#~ " `client_fn` 或任何客户端方法(例如,在 `evaluate` " +#~ "方法中加载评估数据)时(重新)创建。" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "需要等待的客户数量。" #~ msgid "" -#~ "When providing certificates, the server " -#~ "expects a tuple of three certificates." -#~ " :code:`Path` can be used to easily" -#~ " read the contents of those files " -#~ "into byte strings, which is the " -#~ "data type :code:`start_server` expects." +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." #~ msgstr "" -#~ "在提供证书时,服务器希望得到由三个证书组成的元组。 :code:`Path` " -#~ "可用于轻松地将这些文件的内容读取为字节字符串,这就是 :code:`start_server` 期望的数据类型。" +#~ "列出每个客户的 `client_id`。只有在未设置 `num_clients` " +#~ "时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" -#~ msgid "Flower server" -#~ msgstr "Flower 服务器" +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" +#~ "\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU " +#~ "资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解 " +#~ "`num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray" +#~ " 文档。" -#~ msgid "flower-driver-api" -#~ msgstr "flower-driver-api" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" -#~ msgid "flower-fleet-api" -#~ msgstr "flower-fleet-api" +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server`" +#~ " 将使用 `flwr.server.strategy.FedAvg`。" #~ msgid "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." #~ msgstr "" -#~ ":py:obj:`start_driver `\\ " -#~ "\\(\\*\\[\\, server\\_address\\, server\\, ...\\]\\)" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_simulation` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" -#~ msgid "Start a Flower Driver API server." -#~ msgstr "启动基于 Ray 的Flower模拟服务器。" +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } 可以使用空字典(ray_init_args={})来防止向 ray.init " +#~ "传递任何参数。" #~ msgid "" -#~ ":py:obj:`Driver `\\ " -#~ "\\(\\[driver\\_service\\_address\\, ...\\]\\)" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" #~ msgstr "" -#~ "Flower 1.0: ``start_server(..., " -#~ "config=flwr.server.ServerConfig(num_rounds=3, " -#~ "round_timeout=600.0), ...)``" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray:" -#~ msgid "`Driver` class provides an interface to the Driver API." -#~ msgstr "`Driver` 类为驱动程序 API 提供了一个接口。" +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:9091\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" -#~ msgid ":py:obj:`close `\\ \\(\\)" -#~ msgstr "server.strategy.Strategy" +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" -#~ msgid "Disconnect from the SuperLink if connected." -#~ msgstr "如果已连接,请断开与超级链接的连接。" +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" -#~ msgid "start\\_driver" -#~ msgstr "启动客户端" +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" #~ msgid "" -#~ "The IPv4 or IPv6 address of the" -#~ " Driver API server. Defaults to " -#~ "`\"[::]:8080\"`." -#~ msgstr "服务器的 IPv4 或 IPv6 地址。默认为 `\"[::]:8080\"。" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" +#~ "(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 " +#~ "VCE " +#~ "选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " +#~ "NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " +#~ "文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "**hist** -- 包含训练指标的对象。" #~ msgid "" -#~ "A server implementation, either " -#~ "`flwr.server.Server` or a subclass thereof." -#~ " If no instance is provided, then " -#~ "`start_driver` will create one." -#~ msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" #~ msgid "" -#~ "An implementation of the class " -#~ "`flwr.server.ClientManager`. If no implementation" -#~ " is provided, then `start_driver` will " -#~ "use `flwr.server.SimpleClientManager`." +#~ "Please refer to the `full code " +#~ "example `_ to learn more." #~ msgstr "" -#~ "抽象基类 `flwr.server.ClientManager` " -#~ "的实现。如果没有提供实现,`start_server` 将使用 " -#~ "`flwr.server.client_manager.SimpleClientManager`。" +#~ "请参阅 `完整代码示例 " +#~ "`_了解更多信息。" -#~ msgid "The Driver object to use." -#~ msgstr "要使用的驱动程序对象。" +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" -#~ msgid "Starting a driver that connects to an insecure server:" -#~ msgstr "启动不安全的服务器:" +#~ msgid "Dependencies" +#~ msgstr "依赖关系" -#~ msgid "Starting a driver that connects to an SSL-enabled server:" -#~ msgstr "启动支持 SSL 的服务器:" +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" +#~ "要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 " +#~ ":code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过" +#~ " :code:`pip` 来完成:" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "标准Hugging Face工作流程" + +#~ msgid "Handling the data" +#~ msgstr "处理数据" #~ msgid "" -#~ ":py:obj:`run_simulation_from_cli " -#~ "`\\ \\(\\)" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" #~ msgstr "" +#~ "为了获取 IMDB 数据集,我们将使用 Hugging Face 的 " +#~ ":code:`datasets` 库。然后,我们需要对数据进行标记化,并创建 :code:`PyTorch` " +#~ "数据加载器,这些都将在 :code:`load_data` 函数中完成:" -#~ msgid "Run Simulation Engine from the CLI." +#~ msgid "Training and testing the model" +#~ msgstr "训练和测试模型" + +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" #~ msgstr "" +#~ "有了创建 trainloader 和 testloader " +#~ "的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` 训练或测试循环都非常相似:" -#~ msgid "run\\_simulation\\_from\\_cli" -#~ msgstr "运行模拟" +#~ msgid "Creating the model itself" +#~ msgstr "创建模型本身" #~ msgid "" -#~ "Check out this Federated Learning " -#~ "quickstart tutorial for using Flower " -#~ "with MXNet to train a Sequential " -#~ "model on MNIST." -#~ msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 MXNet 在 MNIST 上训练序列模型。" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" +#~ "要创建模型本身,我们只需使用 Hugging Face 的 " +#~ ":code:`AutoModelForSequenceClassification` 加载预训练的 " +#~ "distillBERT 模型:" -#~ msgid "Quickstart MXNet" -#~ msgstr "快速入门 MXNet" +#~ msgid "Creating the IMDBClient" +#~ msgstr "创建 IMDBClient" #~ msgid "" -#~ "MXNet is no longer maintained and " -#~ "has been moved into `Attic " -#~ "`_. As a " -#~ "result, we would encourage you to " -#~ "use other ML frameworks alongside " -#~ "Flower, for example, PyTorch. This " -#~ "tutorial might be removed in future " -#~ "versions of Flower." +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" #~ msgstr "" +#~ "要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " +#~ ":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 " +#~ ":code:`PyTorch` 模型:" #~ msgid "" -#~ "In this tutorial, we will learn " -#~ "how to train a :code:`Sequential` model" -#~ " on MNIST using Flower and MXNet." -#~ msgstr "在本教程中,我们将学习如何使用 Flower 和 MXNet 在 MNIST 上训练 :code:`Sequential` 模型。" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ ":code:`get_parameters` " +#~ "函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" -#~ msgid "Since we want to use MXNet, let's go ahead and install it:" -#~ msgstr "既然我们要使用 MXNet,那就继续安装吧:" +#~ msgid "Starting the server" +#~ msgstr "启动服务器" #~ msgid "" -#~ "Now that we have all our " -#~ "dependencies installed, let's run a " -#~ "simple distributed training with two " -#~ "clients and one server. Our training " -#~ "procedure and network architecture are " -#~ "based on MXNet´s `Hand-written Digit " -#~ "Recognition tutorial " -#~ "`_." +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" #~ msgstr "" -#~ "现在,我们已经安装了所有依赖项,让我们用两个客户端和一个服务器来运行一个简单的分布式训练。我们的训练程序和网络架构基于 " -#~ "MXNet 的 `手写数字识别教程 " -#~ "`_\"。" +#~ "现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " +#~ ":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " +#~ ":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" #~ msgid "" -#~ "In a file called :code:`client.py`, " -#~ "import Flower and MXNet related " -#~ "packages:" -#~ msgstr "在名为 :code:`client.py` 的文件中,导入 Flower 和 MXNet 相关软件包:" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "使用 :code:`weighted_average` " +#~ "函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" -#~ msgid "In addition, define the device allocation in MXNet with:" -#~ msgstr "此外,还可以在 MXNet 中定义设备分配:" +#~ msgid "Putting everything together" +#~ msgstr "把所有东西放在一起" + +#~ msgid "We can now start client instances using:" +#~ msgstr "现在我们可以使用:" #~ msgid "" -#~ "We use MXNet to load MNIST, a " -#~ "popular image classification dataset of " -#~ "handwritten digits for machine learning. " -#~ "The MXNet utility :code:`mx.test_utils.get_mnist()`" -#~ " downloads the training and test " -#~ "data." +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "他们就能连接到服务器,开始联邦训练。" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." #~ msgstr "" -#~ "我们使用 MXNet 加载 MNIST,这是一个用于机器学习的流行手写数字图像分类数据集。MXNet" -#~ " 工具 :code:`mx.test_utils.get_mnist()` 会下载训练和测试数据。" +#~ "如果您想查看所有内容,请查看完整的代码示例: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." #~ msgid "" -#~ "Define the training and loss with " -#~ "MXNet. We train the model by " -#~ "looping over the dataset, measure the" -#~ " corresponding loss, and optimize it." -#~ msgstr "用 MXNet 定义训练和损失值。我们在数据集上循环训练模型,测量相应的损失值,并对其进行优化。" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" +#~ "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower " +#~ "联合Hugging Face的工作流程。" #~ msgid "" -#~ "Next, we define the validation of " -#~ "our machine learning model. We loop " -#~ "over the test set and measure both" -#~ " loss and accuracy on the test " -#~ "set." -#~ msgstr "接下来,我们定义机器学习模型的验证。我们在测试集上循环,测量测试集上的损失值和准确率。" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" #~ msgid "" -#~ "After defining the training and testing" -#~ " of a MXNet machine learning model," -#~ " we use these functions to implement" -#~ " a Flower client." -#~ msgstr "在定义了 MXNet 机器学习模型的训练和测试后,我们使用这些函数实现了 Flower 客户端。" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" -#~ msgid "Our Flower clients will use a simple :code:`Sequential` model:" -#~ msgstr "我们的 Flower 客户端将使用简单的 :code:`Sequential` 模型:" +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" #~ msgid "" -#~ "After loading the dataset with " -#~ ":code:`load_data()` we perform one forward " -#~ "propagation to initialize the model and" -#~ " model parameters with :code:`model(init)`. " -#~ "Next, we implement a Flower client." +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." #~ msgstr "" -#~ "使用 :code:`load_data()` 加载数据集后,我们会执行一次前向传播,使用 " -#~ ":code:`model(init)` 初始化模型和模型参数。接下来,我们实现一个 Flower " -#~ "客户端。" +#~ "请参阅 `完整代码示例 " +#~ "`_ 了解更多信息。" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "在导入 Flower 之前,我们必须先安装它:" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "我们使用 TF 的 Keras 实用程序加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` 会下载 " +#~ "CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" + +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Flower 服务器通过一个名为 :code:`Client` " +#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " +#~ ":code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" #~ msgid "" #~ "Flower provides a convenience class " #~ "called :code:`NumPyClient` which makes it " #~ "easier to implement the :code:`Client` " -#~ "interface when your workload uses MXNet." -#~ " Implementing :code:`NumPyClient` usually means" -#~ " defining the following methods " -#~ "(:code:`set_parameters` is optional though):" +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" #~ msgstr "" #~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" -#~ " MXNet 时,它可以让您更轻松地实现 :code:`Client` 接口。实现 " -#~ ":code:`NumPyClient` 通常意味着定义以下方法(:code:`set_parameters` " -#~ "是可选的):" - -#~ msgid "They can be implemented in the following way:" -#~ msgstr "它们可以通过以下方式实现:" +#~ " Keras 时,该类可以更轻松地实现 :code:`Client` " +#~ "接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" #~ msgid "" #~ "We can now create an instance of" -#~ " our class :code:`MNISTClient` and add " +#~ " our class :code:`CifarClient` and add " #~ "one line to actually run this " #~ "client:" -#~ msgstr "现在我们可以创建一个 :code:`MNISTClient` 类的实例,并添加一行来实际运行该客户端:" +#~ msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" #~ msgid "" #~ "That's it for the client. We only" #~ " have to implement :code:`Client` or " #~ ":code:`NumPyClient` and call " -#~ ":code:`fl.client.start_client()` or " -#~ ":code:`fl.client.start_numpy_client()`. The string " -#~ ":code:`\"0.0.0.0:8080\"` tells the client " -#~ "which server to connect to. In our" -#~ " case we can run the server and" -#~ " the client on the same machine, " -#~ "therefore we use :code:`\"0.0.0.0:8080\"`. If" -#~ " we run a truly federated workload" -#~ " with the server and clients running" -#~ " on different machines, all that " -#~ "needs to change is the " -#~ ":code:`server_address` we pass to the " -#~ "client." +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." #~ msgstr "" #~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" #~ " 并调用 :code:`fl.client.start_client()` 或 " #~ ":code:`fl.client.start_numpy_client()`。字符串 " -#~ ":code:`\"0.0.0.0:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此我们使用" -#~ " " -#~ ":code:`\"0.0.0.0:8080\"`。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是传递给客户端的" +#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " +#~ ":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" #~ " :code:`server_address`。" +#~ msgid "Each client will have its own dataset." +#~ msgstr "每个客户都有自己的数据集。" + #~ msgid "" -#~ "With both client and server ready, " -#~ "we can now run everything and see" -#~ " federated learning in action. Federated" -#~ " learning systems usually have a " -#~ "server and multiple clients. We " -#~ "therefore have to start the server " -#~ "first:" -#~ msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" #~ msgid "" #~ "Congratulations! You've successfully built and" #~ " run your first federated learning " #~ "system. The full `source code " #~ "`_ for this example can " -#~ "be found in :code:`examples/quickstart-mxnet`." +#~ "tensorflow/client.py>`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." #~ msgstr "" -#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。本示例的`完整源代码 " +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " #~ "`_ 可在 :code:`examples/quickstart-" -#~ "mxnet` 中找到。" - -#~ msgid ":code:`load_mnist()`" -#~ msgstr ":code:`load_mnist()`" - -#~ msgid "Loads the MNIST dataset using OpenML" -#~ msgstr "使用 OpenML 加载 MNIST 数据集" - -#~ msgid ":code:`shuffle()`" -#~ msgstr ":code:`shuffle()`" +#~ "tensorflow/client.py>`_ 可以在 :code:`examples/quickstart-" +#~ "tensorflow/client.py` 中找到。" -#~ msgid "Shuffles data and its label" -#~ msgstr "对数据及其标签进行洗牌" +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" -#~ msgid ":code:`partition()`" -#~ msgstr ":code:`partition()`" +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" -#~ msgid "Splits datasets into a number of partitions" -#~ msgstr "将数据集分割成多个分区" +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" -#~ msgid "" -#~ "We load the MNIST dataset from " -#~ "`OpenML " -#~ "`_, a" -#~ " popular image classification dataset of" -#~ " handwritten digits for machine learning." -#~ " The utility :code:`utils.load_mnist()` downloads" -#~ " the training and test data. The " -#~ "training set is split afterwards into" -#~ " 10 partitions with :code:`utils.partition()`." +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" #~ msgstr "" -#~ "我们从 `OpenML `_ 中加载 " -#~ "MNIST 数据集,这是一个用于机器学习的流行手写数字图像分类数据集。实用程序 " -#~ ":code:`utils.load_mnist()` 下载训练和测试数据。然后使用 " -#~ ":code:`utils.partition()`将训练集分割成 10 个分区。" -#~ msgid "Let's get stated!" -#~ msgstr "让我们开始吧!" +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" -#~ msgid "|2b5c62c529f6416f840c594cce062fbb|" +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" #~ msgstr "" -#~ msgid "|90b334680cb7467d9a04d39b8e8dca9f|" +#~ msgid "|89d30862e62e4f9989e193483a08680a|" #~ msgstr "" -#~ msgid "|65764ceee89f4335bfd93fd0b115e831|" +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" #~ msgstr "" -#~ msgid "|d97319ec28bb407ea0ab9705e38f3bcf|" +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" #~ msgstr "" -#~ msgid "|11e95ac83a8548d8b3505b4663187d07|" +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" #~ msgstr "" -#~ msgid "|1dab2f3a23674abc8a6731f20fa10730|" +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" #~ msgstr "" -#~ msgid "|7f0ee162da38450788493a21627306f7|" +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" #~ msgstr "" -#~ msgid "|296a1fb72c514b23b3d8905ff0ff98c6|" +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" #~ msgstr "" -#~ msgid "|5b1408eec0d746cdb91162a9107b6089|" +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" #~ msgstr "" -#~ msgid "|aef19f4b122c4e8d9f4c57f99bcd5dd2|" +#~ msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" #~ msgstr "" -#~ msgid "|2881a86d8fc54ba29d96b29fc2819f4a|" +#~ msgid "|33cacb7d985c4906b348515c1a5cd993|" #~ msgstr "" -#~ msgid "|ec1fe880237247e0975f52766775ab84|" +#~ msgid "|cc080a555947492fa66131dc3a967603|" #~ msgstr "" -#~ msgid "|9fdf048ed58d4467b2718cdf4aaf1ec3|" +#~ msgid "|085c3e0fb8664c6aa06246636524b20b|" #~ msgstr "" -#~ msgid "|ff726bc5505e432388ee2fdd6ef420b9|" +#~ msgid "|bfe69c74e48c45d49b50251c38c2a019|" #~ msgstr "" -#~ msgid "" -#~ "Currently, Flower provides two images, a" -#~ " ``base`` image and a ``superlink`` " -#~ "image. The base image, as the name" -#~ " suggests, contains basic dependencies that" -#~ " the SuperLink needs. This includes " -#~ "system dependencies, Python and Python " -#~ "tools. The SuperLink image is based " -#~ "on the base image, but it " -#~ "additionally installs the SuperLink using " -#~ "``pip``." +#~ msgid "|ebbecd651f0348d99c6511ea859bf4ca|" #~ msgstr "" -#~ "目前,Flower " -#~ "提供两个镜像,一个基础镜像和一个服务器镜像。不久还将推出客户端镜像。基础镜像,顾名思义,包含服务器和客户端都需要的基本依赖项。其中包括系统依赖项、Python" -#~ " 和 Python 工具。服务器镜像基于基础镜像,但它会使用 ``pip`` 额外安装" -#~ " Flower 服务器。" -#~ msgid "``3.11``" -#~ msgstr "``1.0.0rc1``" +#~ msgid "|163117eb654a4273babba413cf8065f5|" +#~ msgstr "" -#~ msgid "Defaults to ``22.04``." -#~ msgstr "默认为 ``22.04``。" +#~ msgid "|452ac3ba453b4cd1be27be1ba7560d64|" +#~ msgstr "" -#~ msgid "Building the SuperLink image" -#~ msgstr "启动服务器" +#~ msgid "|f403fcd69e4e44409627e748b404c086|" +#~ msgstr "" -#~ msgid "Defaults to ``flwr/base``." -#~ msgstr "默认为 ``flwr/server``。" +#~ msgid "|4b00fe63870145968f8443619a792a42|" +#~ msgstr "" -#~ msgid "The Python version of the base image." -#~ msgstr "基础镜像的存储库名称。" +#~ msgid "|368378731066486fa4397e89bc6b870c|" +#~ msgstr "" -#~ msgid "Defaults to ``py3.11``." -#~ msgstr "默认为 ``22.04``。" +#~ msgid "|a66aa83d85bf4ffba7ed660b718066da|" +#~ msgstr "" -#~ msgid "Defaults to ``ubuntu22.04``." -#~ msgstr "默认为 ``py3.11-ubuntu22.04``。" +#~ msgid "|82324b9af72a4582a81839d55caab767|" +#~ msgstr "" -#~ msgid "Defaults to ``flwr``." -#~ msgstr "默认为 ``flwr/server``。" +#~ msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" +#~ msgstr "" #~ msgid "" -#~ "The name of image is ``flwr_superlink``" -#~ " and the tag ``0.1.0``. Remember that" -#~ " the build arguments as well as " -#~ "the name and tag can be adapted" -#~ " to your needs. These values serve" -#~ " as examples only." -#~ msgstr "图像名称为 ``flwr_server``,标记为 ``0.1.0``。请记住,编译参数以及名称和标记都可以根据需要进行调整。这些值仅供参考。" +#~ "Some quickstart examples may have " +#~ "limitations or requirements that prevent " +#~ "them from running on every environment." +#~ " For more information, please see " +#~ "`Limitations`_." +#~ msgstr "" -#~ msgid "Creating New Messages" -#~ msgstr "创建新信息" +#~ msgid "" +#~ "Change the application code. For " +#~ "example, change the ``seed`` in " +#~ "``quickstart_docker/task.py`` to ``43`` and " +#~ "save it:" +#~ msgstr "" + +#~ msgid ":code:`fit`" +#~ msgstr ":code:`fit`" #~ msgid "" -#~ "This is a simple guide for " -#~ "creating a new type of message " -#~ "between the server and clients in " -#~ "Flower." -#~ msgstr "这是一个如何用Flower在服务器和客户端之间创建新类型的信息的简要指导。" +#~ "Note that since version :code:`1.11.0`, " +#~ ":code:`flower-server-app` no longer " +#~ "supports passing a reference to a " +#~ "`ServerApp` attribute. Instead, you need " +#~ "to pass the path to Flower app " +#~ "via the argument :code:`--app`. This is" +#~ " the path to a directory containing" +#~ " a `pyproject.toml`. You can create a" +#~ " valid Flower app by executing " +#~ ":code:`flwr new` and following the " +#~ "prompt." +#~ msgstr "" #~ msgid "" -#~ "Let's suppose we have the following " -#~ "example functions in :code:`server.py` and " -#~ ":code:`numpy_client.py`..." -#~ msgstr "假设我们在脚本code:`server.py`和code:`numpy_client.py`中有以下的示例函数..." +#~ "All required parameters defined above " +#~ "are passed to :code:`XgbClient`'s constructor." +#~ msgstr "" -#~ msgid "Server's side:" -#~ msgstr "在服务器端:" +#~ msgid "|b8714c45b74b4d8fb008e2ebb3bc1d44|" +#~ msgstr "" -#~ msgid "Client's side:" -#~ msgstr "在客户端:" +#~ msgid "|75f1561efcfd422ea67d28d1513120dc|" +#~ msgstr "" -#~ msgid "" -#~ "Let's now see what we need to " -#~ "implement in order to get this " -#~ "simple function between the server and" -#~ " client to work!" -#~ msgstr "现在让我们来看看,为了让服务器和客户端之间的这个简单的函数正常工作,我们需要实现哪些功能!" +#~ msgid "|6a1f51b235304558a9bdaaabfc93b8d2|" +#~ msgstr "" -#~ msgid "Message Types for Protocol Buffers" -#~ msgstr "协议缓冲区的信息类型" +#~ msgid "|35e70dab1fb544af9aa3a9c09c4f9797|" +#~ msgstr "" -#~ msgid "" -#~ "The first thing we need to do " -#~ "is to define a message type for" -#~ " the RPC system in :code:`transport.proto`." -#~ " Note that we have to do it " -#~ "for both the request and response " -#~ "messages. For more details on the " -#~ "syntax of proto3, please see the " -#~ "`official documentation `_." +#~ msgid "|d7efb5705dd3467f991ed23746824a07|" #~ msgstr "" -#~ "我们需要做的第一件事是在脚本code:`transport.proto`中定义 RPC " -#~ "系统的消息类型。请注意,我们必须对请求信息和响应信息都这样做。有关 proto3 语法的更多详情,请参阅官方文档" -#~ " `_。" -#~ msgid "Within the :code:`ServerMessage` block:" -#~ msgstr "在 :code:`ServerMessage` 代码块中:" +#~ msgid "|94e7b021c7b540bfbedf7f082a41ff87|" +#~ msgstr "" -#~ msgid "Within the ClientMessage block:" -#~ msgstr "在 ClientMessage 代码块中:" +#~ msgid "|a80714782dde439ab73936518f91fc3c|" +#~ msgstr "" -#~ msgid "" -#~ "Make sure to also add a field " -#~ "of the newly created message type " -#~ "in :code:`oneof msg`." -#~ msgstr "确保在 :code:`oneof msg` 中也添加一个新创建的消息类型字段。" +#~ msgid "|c62080ca6197473da57d191c8225a9d9|" +#~ msgstr "" -#~ msgid "Once that is done, we will compile the file with:" -#~ msgstr "完成后,我们将使用:" +#~ msgid "|21a8f1e6a5b14a7bbb8559979d0e8a2b|" +#~ msgstr "" -#~ msgid "If it compiles successfully, you should see the following message:" -#~ msgstr "如果编译成功,你应该会看到以下信息:" +#~ msgid "|c310f2a22f7b4917bf42775aae7a1c09|" +#~ msgstr "" -#~ msgid "Serialization and Deserialization Functions" -#~ msgstr "序列化和反序列化函数" +#~ msgid "|a0c5b43401194535a8460bcf02e65f9a|" +#~ msgstr "" -#~ msgid "" -#~ "Our next step is to add functions" -#~ " to serialize and deserialize Python " -#~ "datatypes to or from our defined " -#~ "RPC message types. You should add " -#~ "these functions in :code:`serde.py`." +#~ msgid "|aabfdbd5564e41a790f8ea93cc21a444|" #~ msgstr "" -#~ "下一步是添加函数,以便将 Python 数据类型序列化和反序列化为我们定义的 RPC " -#~ "消息类型或从我们定义的 RPC 消息类型反序列化和反序列化 Python 数据类型。您应该在" -#~ " :code:`serde.py` 中添加这些函数。" -#~ msgid "The four functions:" -#~ msgstr "四种函数:" +#~ msgid "|c9cc8f160fa647b09e742fe4dc8edb54|" +#~ msgstr "" -#~ msgid "Sending the Message from the Server" -#~ msgstr "从服务器发送信息" +#~ msgid "|7e83aad011cd4907b2f02f907c6922e9|" +#~ msgstr "" -#~ msgid "" -#~ "Now write the request function in " -#~ "your Client Proxy class (e.g., " -#~ ":code:`grpc_client_proxy.py`) using the serde " -#~ "functions you just created:" -#~ msgstr "现在,在客户端代理类(例如 :code:`grpc_client_proxy.py`)中使用刚才创建的 serde 函数编写请求函数:" +#~ msgid "|4627c2bb6cc443ae9e079f81f33c9dd9|" +#~ msgstr "" -#~ msgid "Receiving the Message by the Client" -#~ msgstr "由客户端接收信息" +#~ msgid "|131af8322dc5466b827afd24be98f8c0|" +#~ msgstr "" -#~ msgid "" -#~ "Last step! Modify the code in " -#~ ":code:`message_handler.py` to check the field" -#~ " of your message and call the " -#~ ":code:`example_response` function. Remember to " -#~ "use the serde functions!" +#~ msgid "|f92920b87f3a40179bf7ddd0b6144c53|" #~ msgstr "" -#~ "最后一步 修改 :code:`message_handler.py` 中的代码,检查信息的字段并调用" -#~ " :code:`example_response` 函数。记住使用 serde 函数!" -#~ msgid "Within the handle function:" -#~ msgstr "在句柄函数内:" +#~ msgid "|d62da263071d45a496f543e41fce3a19|" +#~ msgstr "" -#~ msgid "And add a new function:" -#~ msgstr "并增加一个新函数:" +#~ msgid "|ad851971645b4e1fbf8d15bcc0b2ee11|" +#~ msgstr "" -#~ msgid "Hopefully, when you run your program you will get the intended result!" -#~ msgstr "希望您在运行程序时能得到预期的结果!" +#~ msgid "|929e9a6de6b34edb8488e644e2bb5221|" +#~ msgstr "" -#~ msgid ":py:obj:`run_driver_api `\\ \\(\\)" -#~ msgstr ":py:obj:`run_driver_api `\\ \\(\\)" +#~ msgid "|404cf9c9e8d64784a55646c0f9479cbc|" +#~ msgstr "" -#~ msgid "Run Flower server (Driver API)." -#~ msgstr "flower-driver-api" +#~ msgid "|b021ff9d25814458b1e631f8985a648b|" +#~ msgstr "" -#~ msgid ":py:obj:`run_fleet_api `\\ \\(\\)" -#~ msgstr ":py:obj:`run_fleet_api `\\ \\(\\)" +#~ msgid "|e6ca84e1df244f238288a768352678e5|" +#~ msgstr "" -#~ msgid "Run Flower server (Fleet API)." -#~ msgstr "Flower 服务器。" +#~ msgid "|39c2422082554a21963baffb33a0d057|" +#~ msgstr "" -#~ msgid "Unreleased" -#~ msgstr "尚未发布" +#~ msgid "|07ecf5fcd6814e88906accec6fa0fbfb|" +#~ msgstr "" -#~ msgid "|d8bf04f23d9b46d8a23cc6f4887d7873|" -#~ msgstr "|d8bf04f23d9b46d8a23cc6f4887d7873|" +#~ msgid "|57e78c0ca8a94ba5a64a04b1f2280e55|" +#~ msgstr "" -#~ msgid "|5aa1711387d74d0f8b9c499e1a51627e|" -#~ msgstr "|5aa1711387d74d0f8b9c499e1a51627e|" +#~ msgid "|9819b40e59ee40a4921e1244e8c99bac|" +#~ msgstr "" -#~ msgid "|2bc8e069228d4873804061ff4a95048c|" -#~ msgstr "|2bc8e069228d4873804061ff4a95048c|" +#~ msgid "|797bf279c4894b5ead31dc9b0534ed62|" +#~ msgstr "" -#~ msgid "|c258488766324dc9a6807f0e7c4fd5f4|" -#~ msgstr "|c258488766324dc9a6807f0e7c4fd5f4|" +#~ msgid "|3a7aceef05f0421794726ac54aaf12fd|" +#~ msgstr "" -#~ msgid "|d5f962c3f4ec48529efda980868c14b0|" -#~ msgstr "|d5f962c3f4ec48529efda980868c14b0|" +#~ msgid "|d741075f8e624331b42c0746f7d258a0|" +#~ msgstr "" -#~ msgid "|a5eccea18d4c43a68b54b65043cabef8|" -#~ msgstr "|a5eccea18d4c43a68b54b65043cabef8|" +#~ msgid "|8fc92d668bcb42b8bda55143847f2329|" +#~ msgstr "" -#~ msgid "|f17662f7df2d42f68cac70a1fdeda8a7|" -#~ msgstr "|f17662f7df2d42f68cac70a1fdeda8a7|" +#~ msgid "|1c705d833a024f22adcaeb8ae3d13b0b|" +#~ msgstr "" -#~ msgid "|241fc906441a4f038c625a19d30d01b2|" -#~ msgstr "|241fc906441a4f038c625a19d30d01b2|" +#~ msgid "|77a037b546a84262b608e04bc82a2c96|" +#~ msgstr "" -#~ msgid "|0aa5aa05810b44b6a835cecce28f3137|" -#~ msgstr "|0aa5aa05810b44b6a835cecce28f3137|" +#~ msgid "|f568e24c9fb0435690ac628210a4be96|" +#~ msgstr "" -#~ msgid "|c742940dd4bf4de09d8d0d5e8d179638|" -#~ msgstr "|c742940dd4bf4de09d8d0d5e8d179638|" +#~ msgid "|a7bf029981514e2593aa3a2b48c9d76a|" +#~ msgstr "" -#~ msgid "|1f169ab4601a47e1a226f1628f4ebddb|" -#~ msgstr "|1f169ab4601a47e1a226f1628f4ebddb|" +#~ msgid "|3f645ad807f84be8b1f8f3267173939c|" +#~ msgstr "" -#~ msgid "|12cfa9cde14440ecb8c8f6c1d7185bec|" -#~ msgstr "|12cfa9cde14440ecb8c8f6c1d7185bec|" +#~ msgid "|a06a9dbd603f45819afd8e8cfc3c4b8f|" +#~ msgstr "" -#~ msgid "|72939caf6e294b0986fee6dde96614d7|" -#~ msgstr "|72939caf6e294b0986fee6dde96614d7|" +#~ msgid "|edcf9a04d96e42608fd01a333375febe|" +#~ msgstr "" -#~ msgid "|83a8daee45da4a98b8d6f24ae098fc50|" -#~ msgstr "|83a8daee45da4a98b8d6f24ae098fc50|" +#~ msgid "|3dae22fe797043968e2b7aa7073c78bd|" +#~ msgstr "" + +#~ msgid "|ba178f75267d4ad8aa7363f20709195f|" +#~ msgstr "" + +#~ msgid "|c380c750bfd2444abce039a1c6fa8e60|" +#~ msgstr "" + +#~ msgid "|e7cec00a114b48359935c6510595132e|" +#~ msgstr "" diff --git a/doc/source/_static/flower-architecture-ECE.png b/doc/source/_static/flower-architecture-ECE.png deleted file mode 100755 index 8ccc83469c5d..000000000000 Binary files a/doc/source/_static/flower-architecture-ECE.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-VCE.png b/doc/source/_static/flower-architecture-VCE.png deleted file mode 100755 index a7ff1a2c2ace..000000000000 Binary files a/doc/source/_static/flower-architecture-VCE.png and /dev/null differ diff --git a/doc/source/_static/flower-architecture-basic-architecture.svg b/doc/source/_static/flower-architecture-basic-architecture.svg new file mode 100644 index 000000000000..65d0ccc05e96 --- /dev/null +++ b/doc/source/_static/flower-architecture-basic-architecture.svg @@ -0,0 +1,4 @@ + + + +
 Client
SuperNode
ClientApp
 Client
SuperNode
ClientApp
 Client
SuperNode
ClientApp
 Server
ServerApp
SuperLink
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-deployment-engine.svg b/doc/source/_static/flower-architecture-deployment-engine.svg new file mode 100644 index 000000000000..2e8dbdfd2626 --- /dev/null +++ b/doc/source/_static/flower-architecture-deployment-engine.svg @@ -0,0 +1,4 @@ + + + +
 User
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperExec



SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
Deployment Engine Executor
flwr run
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-hub-and-spoke.svg b/doc/source/_static/flower-architecture-hub-and-spoke.svg new file mode 100644 index 000000000000..c97f74f2413d --- /dev/null +++ b/doc/source/_static/flower-architecture-hub-and-spoke.svg @@ -0,0 +1,4 @@ + + + +
 

Client
 

Server
 

Client
 

Client
 

Client
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run-1.svg b/doc/source/_static/flower-architecture-multi-run-1.svg new file mode 100644 index 000000000000..4e75224f5b59 --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run-1.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run-2.svg b/doc/source/_static/flower-architecture-multi-run-2.svg new file mode 100644 index 000000000000..b6d20453e98f --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run-2.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture-multi-run.svg b/doc/source/_static/flower-architecture-multi-run.svg new file mode 100644 index 000000000000..91df0c514b52 --- /dev/null +++ b/doc/source/_static/flower-architecture-multi-run.svg @@ -0,0 +1,4 @@ + + + +
 Client
 Server
ServerApp
[run 1]
SuperLink
SuperNode
ServerApp
[run 2]
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
 Client
SuperNode
ClientApp
[run 1]
ClientApp
[run 2]
\ No newline at end of file diff --git a/doc/source/_static/flower-architecture.drawio.png b/doc/source/_static/flower-architecture.drawio.png deleted file mode 100755 index a9c3914a1839..000000000000 Binary files a/doc/source/_static/flower-architecture.drawio.png and /dev/null differ diff --git a/doc/source/_templates/base.html b/doc/source/_templates/base.html index 768c560f4f6a..925e40765b13 100644 --- a/doc/source/_templates/base.html +++ b/doc/source/_templates/base.html @@ -46,9 +46,7 @@ {#- Site title -#} {%- block htmltitle -%} - {% if versions %} - Flower Framework {{ current_version.url }} - {% elif not docstitle %} + {% if not docstitle %} {{ title|striptags|e }} {% elif pagename == master_doc %} {{ docstitle|striptags|e }} diff --git a/doc/source/conf.py b/doc/source/conf.py index d3881325a5ce..6111a972218f 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -90,14 +90,16 @@ author = "The Flower Authors" # The full version of the next release, including alpha/beta/rc tags -release = "1.11.0" +release = "1.13.0" # The current released version rst_prolog = """ -.. |stable_flwr_version| replace:: 1.10.0 +.. |stable_flwr_version| replace:: 1.12.0 .. |stable_flwr_superlink_docker_digest| replace:: 4b317d5b6030710b476f4dbfab2c3a33021ad40a0fcfa54d7edd45e0c51d889c -.. |ubuntu_version| replace:: 22.04 +.. |ubuntu_version| replace:: 24.04 .. |setuptools_version| replace:: 70.3.0 .. |pip_version| replace:: 24.1.2 +.. |python_version| replace:: 3.9 +.. |python_full_version| replace:: 3.9.20 """ # -- General configuration --------------------------------------------------- @@ -122,6 +124,7 @@ "nbsphinx", "sphinx_click", "sphinx_substitution_extensions", + "sphinxext.opengraph", ] # Generate .rst files @@ -249,8 +252,6 @@ def find_test_modules(package_path): "creating-new-messages": "contributor-how-to-create-new-messages.html", "write-documentation": "contributor-how-to-write-documentation.html", "release-process": "contributor-how-to-release-flower.html", - # Restructuring: contributor explanations - "architecture": "contributor-explanation-architecture.html", # Restructuring: contributor references "good-first-contributions": "contributor-ref-good-first-contributions.html", "secagg": "contributor-ref-secure-aggregation-protocols.html", @@ -264,6 +265,9 @@ def find_test_modules(package_path): "example-mxnet-walk-through": "index.html", "ref-api/flwr.simulation.run_simulation_from_cli": "index.html", "contributor-how-to-create-new-messages": "index.html", + "example-jax-from-centralized-to-federated": "tutorial-quickstart-jax.html", + "architecture": "explanation-flower-architecture.html", + "contributor-explanation-architecture.html": "explanation-flower-architecture.html", } # -- Options for HTML output ------------------------------------------------- @@ -272,7 +276,7 @@ def find_test_modules(package_path): # a list of builtin themes. # html_theme = "furo" -html_title = f"Flower Framework" +html_title = "Flower Framework" html_logo = "_static/flower-logo.png" html_favicon = "_static/favicon.ico" html_baseurl = "https://flower.ai/docs/framework/" diff --git a/doc/source/contributor-explanation-architecture.rst b/doc/source/contributor-explanation-architecture.rst deleted file mode 100644 index a20a84313118..000000000000 --- a/doc/source/contributor-explanation-architecture.rst +++ /dev/null @@ -1,26 +0,0 @@ -Flower Architecture -=================== - -Edge Client Engine ------------------- - -`Flower `_ core framework architecture with Edge Client Engine - -.. figure:: _static/flower-architecture-ECE.png - :width: 80 % - -Virtual Client Engine ---------------------- - -`Flower `_ core framework architecture with Virtual Client Engine - -.. figure:: _static/flower-architecture-VCE.png - :width: 80 % - -Virtual Client Engine and Edge Client Engine in the same workload ------------------------------------------------------------------ - -`Flower `_ core framework architecture with both Virtual Client Engine and Edge Client Engine - -.. figure:: _static/flower-architecture.drawio.png - :width: 80 % diff --git a/doc/source/contributor-explanation-public-and-private-apis.rst b/doc/source/contributor-explanation-public-and-private-apis.rst index 1dfdf88f97d3..ac62ae341f14 100644 --- a/doc/source/contributor-explanation-public-and-private-apis.rst +++ b/doc/source/contributor-explanation-public-and-private-apis.rst @@ -1,22 +1,23 @@ Public and private APIs ======================= -In Python, everything is public. -To enable developers to understand which components can be relied upon, Flower declares a public API. -Components that are part of the public API can be relied upon. -Changes to the public API are announced in the release notes and are subject to deprecation policies. +In Python, everything is public. To enable developers to understand which components can +be relied upon, Flower declares a public API. Components that are part of the public API +can be relied upon. Changes to the public API are announced in the release notes and are +subject to deprecation policies. -Everything that is not part of the public API is part of the private API. -Even though Python allows accessing them, user code should never use those components. -Private APIs can change at any time, even in patch releases. +Everything that is not part of the public API is part of the private API. Even though +Python allows accessing them, user code should never use those components. Private APIs +can change at any time, even in patch releases. How can you determine whether a component is part of the public API or not? Easy: - `Use the Flower API reference documentation `_ - `Use the Flower CLI reference documentation `_ -Everything listed in the reference documentation is part of the public API. -This document explains how Flower maintainers define the public API and how you can determine whether a component is part of the public API or not by reading the Flower source code. +Everything listed in the reference documentation is part of the public API. This +document explains how Flower maintainers define the public API and how you can determine +whether a component is part of the public API or not by reading the Flower source code. Flower public API ----------------- @@ -25,94 +26,117 @@ Flower has a well-defined public API. Let's look at this in more detail. .. important:: - Every component that is reachable by recursively following ``__init__.__all__`` starting from the root package (``flwr``) is part of the public API. + Every component that is reachable by recursively following ``__init__.__all__`` + starting from the root package (``flwr``) is part of the public API. -If you want to determine whether a component (class/function/generator/...) is part of the public API or not, you need to start at the root of the ``flwr`` package. -Let's use ``tree -L 1 -d src/py/flwr`` to look at the Python sub-packages contained ``flwr``: +If you want to determine whether a component (class/function/generator/...) is part of +the public API or not, you need to start at the root of the ``flwr`` package. Let's use +``tree -L 1 -d src/py/flwr`` to look at the Python sub-packages contained ``flwr``: .. code-block:: bash - flwr - ├── cli - ├── client - ├── common - ├── proto - ├── server - └── simulation + flwr + ├── cli + ├── client + ├── common + ├── proto + ├── server + └── simulation -Contrast this with the definition of ``__all__`` in the root ``src/py/flwr/__init__.py``: +Contrast this with the definition of ``__all__`` in the root +``src/py/flwr/__init__.py``: .. code-block:: python - # From `flwr/__init__.py` - __all__ = [ - "client", - "common", - "server", - "simulation", - ] - -You can see that ``flwr`` has six subpackages (``cli``, ``client``, ``common``, ``proto``, ``server``, ``simulation``), but only four of them are "exported" via ``__all__`` (``client``, ``common``, ``server``, ``simulation``). - -What does this mean? It means that ``client``, ``common``, ``server`` and ``simulation`` are part of the public API, but ``cli`` and ``proto`` are not. -The ``flwr`` subpackages ``cli`` and ``proto`` are private APIs. -A private API can change completely from one release to the next (even in patch releases). -It can change in a breaking way, it can be renamed (for example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can even be removed completely. + # From `flwr/__init__.py` + __all__ = [ + "client", + "common", + "server", + "simulation", + ] + +You can see that ``flwr`` has six subpackages (``cli``, ``client``, ``common``, +``proto``, ``server``, ``simulation``), but only four of them are "exported" via +``__all__`` (``client``, ``common``, ``server``, ``simulation``). + +What does this mean? It means that ``client``, ``common``, ``server`` and ``simulation`` +are part of the public API, but ``cli`` and ``proto`` are not. The ``flwr`` subpackages +``cli`` and ``proto`` are private APIs. A private API can change completely from one +release to the next (even in patch releases). It can change in a breaking way, it can be +renamed (for example, ``flwr.cli`` could be renamed to ``flwr.command``) and it can even +be removed completely. Therefore, as a Flower user: - ``from flwr import client`` ✅ Ok, you're importing a public API. - ``from flwr import proto`` ❌ Not recommended, you're importing a private API. -What about components that are nested deeper in the hierarchy? Let's look at Flower strategies to see another typical pattern. -Flower strategies like ``FedAvg`` are often imported using ``from flwr.server.strategy import FedAvg``. -Let's look at ``src/py/flwr/server/strategy/__init__.py``: +What about components that are nested deeper in the hierarchy? Let's look at Flower +strategies to see another typical pattern. Flower strategies like ``FedAvg`` are often +imported using ``from flwr.server.strategy import FedAvg``. Let's look at +``src/py/flwr/server/strategy/__init__.py``: .. code-block:: python - from .fedavg import FedAvg as FedAvg - # ... more imports + from .fedavg import FedAvg as FedAvg + + # ... more imports - __all__ = [ - "FedAvg", - # ... more exports - ] + __all__ = [ + "FedAvg", + # ... more exports + ] -What's notable here is that all strategies are implemented in dedicated modules (e.g., ``fedavg.py``). -In ``__init__.py``, we *import* the components we want to make part of the public API and then *export* them via ``__all__``. -Note that we export the component itself (for example, the ``FedAvg`` class), but not the module it is defined in (for example, ``fedavg.py``). -This allows us to move the definition of ``FedAvg`` into a different module (or even a module in a subpackage) without breaking the public API (as long as we update the import path in ``__init__.py``). +What's notable here is that all strategies are implemented in dedicated modules (e.g., +``fedavg.py``). In ``__init__.py``, we *import* the components we want to make part of +the public API and then *export* them via ``__all__``. Note that we export the component +itself (for example, the ``FedAvg`` class), but not the module it is defined in (for +example, ``fedavg.py``). This allows us to move the definition of ``FedAvg`` into a +different module (or even a module in a subpackage) without breaking the public API (as +long as we update the import path in ``__init__.py``). Therefore: -- ``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a class that is part of the public API. -- ``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're importing a private module. +- ``from flwr.server.strategy import FedAvg`` ✅ Ok, you're importing a class that is + part of the public API. +- ``from flwr.server.strategy import fedavg`` ❌ Not recommended, you're importing a + private module. -This approach is also implemented in the tooling that automatically builds API reference docs. +This approach is also implemented in the tooling that automatically builds API reference +docs. Flower public API of private packages ------------------------------------- -We also use this to define the public API of private subpackages. -Public, in this context, means the API that other ``flwr`` subpackages should use. -For example, ``flwr.server.driver`` is a private subpackage (it's not exported via ``src/py/flwr/server/__init__.py``'s ``__all__``). +We also use this to define the public API of private subpackages. Public, in this +context, means the API that other ``flwr`` subpackages should use. For example, +``flwr.server.driver`` is a private subpackage (it's not exported via +``src/py/flwr/server/__init__.py``'s ``__all__``). -Still, the private sub-package ``flwr.server.driver`` defines a "public" API using ``__all__`` in ``src/py/flwr/server/driver/__init__.py``: +Still, the private sub-package ``flwr.server.driver`` defines a "public" API using +``__all__`` in ``src/py/flwr/server/driver/__init__.py``: .. code-block:: python - from .driver import Driver - from .grpc_driver import GrpcDriver - from .inmemory_driver import InMemoryDriver - - __all__ = [ - "Driver", - "GrpcDriver", - "InMemoryDriver", - ] - -The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` are never used by Flower framework users, only by other parts of the Flower framework codebase. -Those other parts of the codebase import, for example, ``InMemoryDriver`` using ``from flwr.server.driver import InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via ``__all__``), not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` (``in_memory_driver.py`` is the module containing the actual ``InMemoryDriver`` class definition). - -This is because ``flwr.server.driver`` defines a public interface for other ``flwr`` subpackages. -This allows codeowners of ``flwr.server.driver`` to refactor the package without breaking other ``flwr``-internal users. + from .driver import Driver + from .grpc_driver import GrpcDriver + from .inmemory_driver import InMemoryDriver + + __all__ = [ + "Driver", + "GrpcDriver", + "InMemoryDriver", + ] + +The interesting part is that both ``GrpcDriver`` and ``InMemoryDriver`` are never used +by Flower framework users, only by other parts of the Flower framework codebase. Those +other parts of the codebase import, for example, ``InMemoryDriver`` using ``from +flwr.server.driver import InMemoryDriver`` (i.e., the ``InMemoryDriver`` exported via +``__all__``), not ``from flwr.server.driver.in_memory_driver import InMemoryDriver`` +(``in_memory_driver.py`` is the module containing the actual ``InMemoryDriver`` class +definition). + +This is because ``flwr.server.driver`` defines a public interface for other ``flwr`` +subpackages. This allows codeowners of ``flwr.server.driver`` to refactor the package +without breaking other ``flwr``-internal users. diff --git a/doc/source/contributor-how-to-build-docker-images.rst b/doc/source/contributor-how-to-build-docker-images.rst index 522d124dfd9b..0b3ce243ce50 100644 --- a/doc/source/contributor-how-to-build-docker-images.rst +++ b/doc/source/contributor-how-to-build-docker-images.rst @@ -2,131 +2,161 @@ How to Build Docker Flower Images Locally ========================================= Flower provides pre-made docker images on `Docker Hub `_ -that include all necessary dependencies for running the SuperLink, SuperNode or ServerApp. -You can also build your own custom docker images from scratch with a different version of Python -or Linux distribution (Ubuntu/Alpine) if that is what you need. In this guide, we will explain what -images exist and how to build them locally. +that include all necessary dependencies for running the SuperLink, SuperNode or +ServerApp. You can also build your own custom docker images from scratch with a +different version of Python or Linux distribution (Ubuntu/Alpine) if that is what you +need. In this guide, we will explain what images exist and how to build them locally. -Before we can start, we need to meet a few prerequisites in our local development environment. +Before we can start, we need to meet a few prerequisites in our local development +environment. -#. Clone the ``flower`` repository. +1. Clone the ``flower`` repository. - .. code-block:: bash + .. code-block:: bash - $ git clone --depth=1 https://github.com/adap/flower.git && cd flower + $ git clone --depth=1 https://github.com/adap/flower.git && cd flower -#. Verify the Docker daemon is running. +2. Verify the Docker daemon is running. - The build instructions that assemble the images are located in the respective Dockerfiles. You - can find them in the subdirectories of ``src/docker``. + The build instructions that assemble the images are located in the respective + Dockerfiles. You can find them in the subdirectories of ``src/docker``. - Flower Docker images are configured via build arguments. Through build arguments, we can make the - creation of images more flexible. For example, in the base image, we can specify the version of - Python to install using the ``PYTHON_VERSION`` build argument. Some of the build arguments have - default values, others must be specified when building the image. All available build arguments for - each image are listed in one of the tables below. + Flower Docker images are configured via build arguments. Through build arguments, we + can make the creation of images more flexible. For example, in the base image, we can + specify the version of Python to install using the ``PYTHON_VERSION`` build argument. + Some of the build arguments have default values, others must be specified when + building the image. All available build arguments for each image are listed in one of + the tables below. -Building the base image +Building the Base Image ----------------------- .. list-table:: - :widths: 25 45 15 15 - :header-rows: 1 - - * - Build argument - - Description - - Required - - Example - * - ``DISTRO`` - - The Linux distribution to use as the base image. - - No - - ``ubuntu`` - * - ``DISTRO_VERSION`` - - Version of the Linux distribution. - - No - - :substitution-code:`|ubuntu_version|` - * - ``PYTHON_VERSION`` - - Version of ``python`` to be installed. - - No - - ``3.11`` or ``3.11.1`` - * - ``PIP_VERSION`` - - Version of ``pip`` to be installed. - - Yes - - :substitution-code:`|pip_version|` - * - ``SETUPTOOLS_VERSION`` - - Version of ``setuptools`` to be installed. - - Yes - - :substitution-code:`|setuptools_version|` - * - ``FLWR_VERSION`` - - Version of Flower to be installed. - - Yes - - :substitution-code:`|stable_flwr_version|` - * - ``FLWR_PACKAGE`` - - The Flower package to be installed. - - No - - ``flwr`` or ``flwr-nightly`` - -The following example creates a base Ubuntu/Alpine image with Python ``3.11.0``, -pip :substitution-code:`|pip_version|`, setuptools :substitution-code:`|setuptools_version|` + :widths: 25 45 15 15 + :header-rows: 1 + + - - Build argument + - Description + - Required + - Example + - - ``DISTRO`` + - The Linux distribution to use as the base image. + - No + - ``ubuntu`` + - - ``DISTRO_VERSION`` + - Version of the Linux distribution. + - No + - :substitution-code:`|ubuntu_version|` + - - ``PYTHON_VERSION`` + - Version of ``python`` to be installed. + - No + - ``3.11`` or ``3.11.1`` + - - ``PIP_VERSION`` + - Version of ``pip`` to be installed. + - Yes + - :substitution-code:`|pip_version|` + - - ``SETUPTOOLS_VERSION`` + - Version of ``setuptools`` to be installed. + - Yes + - :substitution-code:`|setuptools_version|` + - - ``FLWR_VERSION`` + - Version of Flower to be installed. + - Yes + - :substitution-code:`|stable_flwr_version|` + - - ``FLWR_PACKAGE`` + - The Flower package to be installed. + - No + - ``flwr`` or ``flwr-nightly`` + - - ``FLWR_VERSION_REF`` + - A `direct reference + `_ + without the ``@`` specifier. If both ``FLWR_VERSION`` and ``FLWR_VERSION_REF`` + are specified, the ``FLWR_VERSION_REF`` has precedence. + - No + - `Direct Reference Examples`_ + +The following example creates a base Ubuntu/Alpine image with Python ``3.11.0``, pip +:substitution-code:`|pip_version|`, setuptools :substitution-code:`|setuptools_version|` and Flower :substitution-code:`|stable_flwr_version|`: .. code-block:: bash - :substitutions: + :substitutions: - $ cd src/docker/base/ - $ docker build \ - --build-arg PYTHON_VERSION=3.11.0 \ - --build-arg FLWR_VERSION=|stable_flwr_version| \ - --build-arg PIP_VERSION=|pip_version| \ - --build-arg SETUPTOOLS_VERSION=|setuptools_version| \ - -t flwr_base:0.1.0 . + $ cd src/docker/base/ + $ docker build \ + --build-arg PYTHON_VERSION=3.11.0 \ + --build-arg FLWR_VERSION=|stable_flwr_version| \ + --build-arg PIP_VERSION=|pip_version| \ + --build-arg SETUPTOOLS_VERSION=|setuptools_version| \ + -t flwr_base:0.1.0 . -In this example, we specify our image name as ``flwr_base`` and the tag as ``0.1.0``. Remember that the build arguments as well -as the name and tag can be adapted to your needs. These values serve as examples only. +In this example, we specify our image name as ``flwr_base`` and the tag as ``0.1.0``. +Remember that the build arguments as well as the name and tag can be adapted to your +needs. These values serve as examples only. -Building the SuperLink/SuperNode or ServerApp image ---------------------------------------------------- +Building a Flower Binary Image +------------------------------ .. list-table:: - :widths: 25 45 15 15 - :header-rows: 1 - - * - Build argument - - Description - - Required - - Example - * - ``BASE_REPOSITORY`` - - The repository name of the base image. - - No - - ``flwr/base`` - * - ``BASE_IMAGE`` - - The Tag of the Flower base image. - - Yes - - :substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|` - -For example, to build a SuperLink image with the latest Flower version, Python 3.11 and Ubuntu 22.04, run the following: + :widths: 25 45 15 15 + :header-rows: 1 + + - - Build argument + - Description + - Required + - Example + - - ``BASE_REPOSITORY`` + - The repository name of the base image. + - No + - ``flwr/base`` + - - ``BASE_IMAGE`` + - The Tag of the Flower base image. + - Yes + - :substitution-code:`|stable_flwr_version|-py3.11-ubuntu|ubuntu_version|` + +For example, to build a SuperLink image with the latest Flower version, Python 3.11 and +Ubuntu 22.04, run the following: .. code-block:: bash - :substitutions: + :substitutions: - $ cd src/docker/superlink - $ docker build \ - --build-arg BASE_IMAGE=|stable_flwr_version|-py3.11-ubuntu22.04 \ - -t flwr_superlink:0.1.0 . + $ cd src/docker/superlink + $ docker build \ + --build-arg BASE_IMAGE=|stable_flwr_version|-py3.11-ubuntu22.04 \ + -t flwr_superlink:0.1.0 . -If you want to use your own base image instead of the official Flower base image, all you need to do -is set the ``BASE_REPOSITORY`` build argument to ``flwr_base`` (as we've specified above). +If you want to use your own base image instead of the official Flower base image, all +you need to do is set the ``BASE_REPOSITORY`` build argument to ``flwr_base`` (as we've +specified above). .. code-block:: bash - $ cd src/docker/superlink/ - $ docker build \ - --build-arg BASE_REPOSITORY=flwr_base \ - --build-arg BASE_IMAGE=0.1.0 - -t flwr_superlink:0.1.0 . + $ cd src/docker/superlink/ + $ docker build \ + --build-arg BASE_REPOSITORY=flwr_base \ + --build-arg BASE_IMAGE=0.1.0 + -t flwr_superlink:0.1.0 . After creating the image, we can test whether the image is working: .. code-block:: bash - $ docker run --rm flwr_superlink:0.1.0 --help + $ docker run --rm flwr_superlink:0.1.0 --help + +Direct Reference Examples +------------------------- + +.. code-block:: bash + :substitutions: + + # main branch + git+https://github.com/adap/flower.git@main + + # commit hash + git+https://github.com/adap/flower.git@1187c707f1894924bfa693d99611cf6f93431835 + + # tag + git+https://github.com/adap/flower.git@|stable_flwr_version| + + # artifact store + https://artifact.flower.ai/py/main/latest/flwr-|stable_flwr_version|-py3-none-any.whl diff --git a/doc/source/contributor-how-to-contribute-translations.rst b/doc/source/contributor-how-to-contribute-translations.rst index ba59901cf1c4..5fff62833b0e 100644 --- a/doc/source/contributor-how-to-contribute-translations.rst +++ b/doc/source/contributor-how-to-contribute-translations.rst @@ -2,70 +2,67 @@ Contribute translations ======================= Since `Flower 1.5 -`_ we -have introduced translations to our doc pages, but, as you might have noticed, -the translations are often imperfect. If you speak languages other than -English, you might be able to help us in our effort to make Federated Learning -accessible to as many people as possible by contributing to those translations! -This might also be a great opportunity for those wanting to become open source -contributors with little prerequisites. +`_ we have +introduced translations to our doc pages, but, as you might have noticed, the +translations are often imperfect. If you speak languages other than English, you might +be able to help us in our effort to make Federated Learning accessible to as many people +as possible by contributing to those translations! This might also be a great +opportunity for those wanting to become open source contributors with little +prerequisites. Our translation project is publicly available over on `Weblate -`_, this where most -of the work will happen. +`_, this where most of the +work will happen. Contribute to existing languages -------------------------------- .. youtube:: 10_Xfy5BOfQ - :width: 100% + :width: 100% -The first thing you will need to do in order to contribute is to create a -free Weblate account on this `page -`_. More information -about profile settings can be found `here +The first thing you will need to do in order to contribute is to create a free Weblate +account on this `page `_. More +information about profile settings can be found `here `_. -Once you are signed in to Weblate, you can navigate to the `Flower Framework -project `_. Here, -you should see the different existing languages that can be found on the -website. +Once you are signed in to Weblate, you can navigate to the `Flower Framework project +`_. Here, you should see the +different existing languages that can be found on the website. -Once you have selected the language you want to contribute to, you should see a -similar interface to this: +Once you have selected the language you want to contribute to, you should see a similar +interface to this: - .. image:: _static/weblate_status.png + .. image:: _static/weblate_status.png -The most straight forward option here is to click on the ``Translate`` button -on the top right (in the ``Translation status`` section). This will -automatically bring you to the translation interface for untranslated strings. +The most straight forward option here is to click on the ``Translate`` button on the top +right (in the ``Translation status`` section). This will automatically bring you to the +translation interface for untranslated strings. This is what the interface looks like: - .. image:: _static/weblate_interface.png + .. image:: _static/weblate_interface.png -You input your translation in the text box at the top and then, once you are -happy with it, you either press ``Save and continue`` (to save the translation -and go to the next untranslated string), ``Save and stay`` (to save the -translation and stay on the same page), ``Suggest`` (to add your translation to -suggestions for other users to view), or ``Skip`` (to go to the next -untranslated string without saving anything). +You input your translation in the text box at the top and then, once you are happy with +it, you either press ``Save and continue`` (to save the translation and go to the next +untranslated string), ``Save and stay`` (to save the translation and stay on the same +page), ``Suggest`` (to add your translation to suggestions for other users to view), or +``Skip`` (to go to the next untranslated string without saving anything). In order to help with the translations, you can see on the bottom the ``Nearby -strings``, the ``Comments`` (from other contributors), the ``Automatic -suggestions`` (from machine translation engines), the translations in ``Other -languages``, and the ``History`` of translations for this string. +strings``, the ``Comments`` (from other contributors), the ``Automatic suggestions`` +(from machine translation engines), the translations in ``Other languages``, and the +``History`` of translations for this string. -On the right, under the ``String information`` section, you can also click the -link under ``Source string location`` in order to view the source of the doc -file containing the string. +On the right, under the ``String information`` section, you can also click the link +under ``Source string location`` in order to view the source of the doc file containing +the string. -For more information about translating using Weblate, you can check out this -`in-depth guide `_. +For more information about translating using Weblate, you can check out this `in-depth +guide `_. Add new languages ----------------- -If you want to add a new language, you will first have to contact us, either on -`Slack `_, or by opening an issue on our `GitHub -repo `_. +If you want to add a new language, you will first have to contact us, either on `Slack +`_, or by opening an issue on our `GitHub repo +`_. diff --git a/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst b/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst index c861457b6edc..79f52f8d8f6f 100644 --- a/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst +++ b/doc/source/contributor-how-to-develop-in-vscode-dev-containers.rst @@ -1,24 +1,47 @@ Develop in VSCode Dev Containers ================================ -When working on the Flower framework we want to ensure that all contributors use the same developer environment to format code or run tests. For this purpose we are using the VSCode Remote Containers extension. What is it? Read the following quote: - - - The Visual Studio Code Remote - Containers extension lets you use a Docker container as a fully-featured development environment. It allows you to open any folder inside (or mounted into) a container and take advantage of Visual Studio Code's full feature set. A :code:`devcontainer.json` file in your project tells VS Code how to access (or create) a development container with a well-defined tool and runtime stack. This container can be used to run an application or to separate tools, libraries, or runtimes needed for working with a codebase. - - Workspace files are mounted from the local file system or copied or cloned into the container. Extensions are installed and run inside the container, where they have full access to the tools, platform, and file system. This means that you can seamlessly switch your entire development environment just by connecting to a different container. - -Source: `Official VSCode documentation `_ - +When working on the Flower framework we want to ensure that all contributors use the +same developer environment to format code or run tests. For this purpose we are using +the VSCode Remote Containers extension. What is it? Read the following quote: + + The Visual Studio Code Remote - Containers extension lets you use a Docker container + as a fully-featured development environment. It allows you to open any folder inside + (or mounted into) a container and take advantage of Visual Studio Code's full + feature set. A ``devcontainer.json`` file in your project tells VS Code how to + access (or create) a development container with a well-defined tool and runtime + stack. This container can be used to run an application or to separate tools, + libraries, or runtimes needed for working with a codebase. + + Workspace files are mounted from the local file system or copied or cloned into the + container. Extensions are installed and run inside the container, where they have + full access to the tools, platform, and file system. This means that you can + seamlessly switch your entire development environment just by connecting to a + different container. + +Source: `Official VSCode documentation +`_ Getting started --------------- -Configuring and setting up the :code:`Dockerfile` as well the configuration for the devcontainer can be a bit more involved. The good thing is you don't have to do it. Usually it should be enough to install `Docker `_ on your system and ensure its available on your command line. Additionally, install the `VSCode Containers Extension `_. - -Now you should be good to go. When starting VSCode, it will ask you to run in the container environment and - if you confirm - automatically build the container and use it. To manually instruct VSCode to use the devcontainer, you can, after installing the extension, click the green area in the bottom left corner of your VSCode window and select the option *(Re)Open Folder in Container*. - -In some cases your setup might be more involved. For those cases consult the following sources: - -* `Developing inside a Container `_ -* `Remote development in Containers `_ +Configuring and setting up the ``Dockerfile`` as well the configuration for the +devcontainer can be a bit more involved. The good thing is you don't have to do it. +Usually it should be enough to install `Docker +`_ on your system and ensure its available on +your command line. Additionally, install the `VSCode Containers Extension +`_. + +Now you should be good to go. When starting VSCode, it will ask you to run in the +container environment and - if you confirm - automatically build the container and use +it. To manually instruct VSCode to use the devcontainer, you can, after installing the +extension, click the green area in the bottom left corner of your VSCode window and +select the option *(Re)Open Folder in Container*. + +In some cases your setup might be more involved. For those cases consult the following +sources: + +- `Developing inside a Container + `_ +- `Remote development in Containers + `_ diff --git a/doc/source/contributor-how-to-install-development-versions.rst b/doc/source/contributor-how-to-install-development-versions.rst index 0f0773c85e73..61c123a24309 100644 --- a/doc/source/contributor-how-to-install-development-versions.rst +++ b/doc/source/contributor-how-to-install-development-versions.rst @@ -7,10 +7,13 @@ Install development versions of Flower Using Poetry (recommended) ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``). +Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency in +``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` (``rm +poetry.lock``) before running ``poetry install``). - ``flwr = { version = "1.0.0a0", allow-prereleases = true }`` (without extras) -- ``flwr = { version = "1.0.0a0", allow-prereleases = true, extras = ["simulation"] }`` (with extras) +- ``flwr = { version = "1.0.0a0", allow-prereleases = true, extras = ["simulation"] }`` + (with extras) Install ``flwr`` from a local copy of the Flower source code via ``pyproject.toml``: @@ -20,9 +23,11 @@ Install ``flwr`` from a local copy of the Flower source code via ``pyproject.tom Install ``flwr`` from a local wheel file via ``pyproject.toml``: - ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl" }`` (without extras) -- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] }`` (with extras) +- ``flwr = { path = "../../dist/flwr-1.8.0-py3-none-any.whl", extras = ["simulation"] + }`` (with extras) -Please refer to the Poetry documentation for further details: `Poetry Dependency Specification `_ +Please refer to the Poetry documentation for further details: `Poetry Dependency +Specification `_ Using pip (recommended on Colab) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -32,18 +37,21 @@ Install a ``flwr`` pre-release from PyPI: - ``pip install -U --pre flwr`` (without extras) - ``pip install -U --pre 'flwr[simulation]'`` (with extras) -Python packages can be installed from git repositories. Use one of the following commands to install the Flower directly from GitHub. +Python packages can be installed from git repositories. Use one of the following +commands to install the Flower directly from GitHub. Install ``flwr`` from the default GitHub branch (``main``): - ``pip install flwr@git+https://github.com/adap/flower.git`` (without extras) -- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` (with extras) +- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git'`` (with + extras) Install ``flwr`` from a specific GitHub branch (``branch-name``): -- ``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (without extras) -- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-name'`` (with extras) - +- ``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` (without + extras) +- ``pip install 'flwr[simulation]@git+https://github.com/adap/flower.git@branch-name'`` + (with extras) Open Jupyter Notebooks on Google Colab -------------------------------------- @@ -52,12 +60,15 @@ Open the notebook ``doc/source/tutorial-series-get-started-with-flower-pytorch.i - https://colab.research.google.com/github/adap/flower/blob/main/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb -Open a development version of the same notebook from branch `branch-name` by changing ``main`` to ``branch-name`` (right after ``blob``): +Open a development version of the same notebook from branch `branch-name` by changing +``main`` to ``branch-name`` (right after ``blob``): - https://colab.research.google.com/github/adap/flower/blob/branch-name/doc/source/tutorial-series-get-started-with-flower-pytorch.ipynb Install a `whl` on Google Colab: -1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to session storage`` +1. In the vertical icon grid on the left hand side, select ``Files`` > ``Upload to + session storage`` 2. Upload the whl (e.g., ``flwr-1.8.0-py3-none-any.whl``) -3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` +3. Change ``!pip install -q 'flwr[simulation]' torch torchvision matplotlib`` to ``!pip + install -q 'flwr-1.8.0-py3-none-any.whl[simulation]' torch torchvision matplotlib`` diff --git a/doc/source/contributor-how-to-release-flower.rst b/doc/source/contributor-how-to-release-flower.rst index 4853d87bc4c1..fafc02cab64c 100644 --- a/doc/source/contributor-how-to-release-flower.rst +++ b/doc/source/contributor-how-to-release-flower.rst @@ -1,16 +1,28 @@ Release Flower ============== -This document describes the current release process. It may or may not change in the future. +This document describes the current release process. It may or may not change in the +future. During the release ------------------ -The version number of a release is stated in ``pyproject.toml``. To release a new version of Flower, the following things need to happen (in that order): - -1. Run ``python3 src/py/flwr_tool/update_changelog.py `` in order to add every new change to the changelog (feel free to make manual changes to the changelog afterwards until it looks good). -2. Once the changelog has been updated with all the changes, run ``./dev/prepare-release-changelog.sh v``, where ```` is the version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will replace the ``Unreleased`` header of the changelog by the version and current date, and it will add a thanking message for the contributors. Open a pull request with those changes. -3. Once the pull request is merged, tag the release commit with the version number as soon as the PR is merged: ``git tag v`` (notice the ``v`` added before the version number), then ``git push --tags``. This will create a draft release on GitHub containing the correct artifacts and the relevant part of the changelog. +The version number of a release is stated in ``pyproject.toml``. To release a new +version of Flower, the following things need to happen (in that order): + +1. Run ``python3 src/py/flwr_tool/update_changelog.py `` in order to add + every new change to the changelog (feel free to make manual changes to the changelog + afterwards until it looks good). +2. Once the changelog has been updated with all the changes, run + ``./dev/prepare-release-changelog.sh v``, where ```` is the + version stated in ``pyproject.toml`` (notice the ``v`` added before it). This will + replace the ``Unreleased`` header of the changelog by the version and current date, + and it will add a thanking message for the contributors. Open a pull request with + those changes. +3. Once the pull request is merged, tag the release commit with the version number as + soon as the PR is merged: ``git tag v`` (notice the ``v`` added before + the version number), then ``git push --tags``. This will create a draft release on + GitHub containing the correct artifacts and the relevant part of the changelog. 4. Check the draft release on GitHub, and if everything is good, publish it. After the release @@ -22,7 +34,8 @@ Create a pull request which contains the following changes: 2. Update all files which contain the current version number if necessary. 3. Add a new ``Unreleased`` section in ``changelog.md``. -Merge the pull request on the same day (i.e., before a new nightly release gets published to PyPI). +Merge the pull request on the same day (i.e., before a new nightly release gets +published to PyPI). Publishing a pre-release ------------------------ @@ -30,7 +43,8 @@ Publishing a pre-release Pre-release naming ~~~~~~~~~~~~~~~~~~ -PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases MUST use one of the following naming patterns: +PyPI supports pre-releases (alpha, beta, release candidate). Pre-releases MUST use one +of the following naming patterns: - Alpha: ``MAJOR.MINOR.PATCHaN`` - Beta: ``MAJOR.MINOR.PATCHbN`` @@ -43,19 +57,25 @@ Examples include: - ``1.0.0rc0`` - ``1.0.0rc1`` -This is in line with PEP-440 and the recommendations from the Python Packaging -Authority (PyPA): +This is in line with PEP-440 and the recommendations from the Python Packaging Authority +(PyPA): - `PEP-440 `_ -- `PyPA Choosing a versioning scheme `_ +- `PyPA Choosing a versioning scheme + `_ -Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 spec, for details consult the `Semantic Versioning Specification `_ (specifically item 11 on precedence). +Note that the approach defined by PyPA is not compatible with SemVer 2.0.0 spec, for +details consult the `Semantic Versioning Specification +`_ (specifically item 11 on +precedence). Pre-release classification ~~~~~~~~~~~~~~~~~~~~~~~~~~ Should the next pre-release be called alpha, beta, or release candidate? -- RC: feature complete, no known issues (apart from issues that are classified as "won't fix" for the next stable release) - if no issues surface this will become the next stable release +- RC: feature complete, no known issues (apart from issues that are classified as "won't + fix" for the next stable release) - if no issues surface this will become the next + stable release - Beta: feature complete, allowed to have known issues - Alpha: not feature complete, allowed to have known issues diff --git a/doc/source/contributor-how-to-set-up-a-virtual-env.rst b/doc/source/contributor-how-to-set-up-a-virtual-env.rst index 8b684e24c658..7e54ed64c9c9 100644 --- a/doc/source/contributor-how-to-set-up-a-virtual-env.rst +++ b/doc/source/contributor-how-to-set-up-a-virtual-env.rst @@ -1,26 +1,33 @@ Set up a virtual env ==================== -It is recommended to run your Python setup within a virtual environment. -This guide shows three different examples how to create a virtual environment with pyenv virtualenv, poetry, or Anaconda. -You can follow the instructions or choose your preferred setup. +It is recommended to run your Python setup within a virtual environment. This guide +shows three different examples how to create a virtual environment with pyenv +virtualenv, poetry, or Anaconda. You can follow the instructions or choose your +preferred setup. Python Version -------------- -Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. +Flower requires at least `Python 3.9 `_, but `Python 3.10 +`_ or above is recommended. .. note:: - Due to a known incompatibility with `ray `_, - we currently recommend utilizing at most `Python 3.11 `_ for - running Flower simulations. + + Due to a known incompatibility with `ray `_, we + currently recommend utilizing at most `Python 3.11 `_ + for running Flower simulations. Virtualenv with Pyenv/Virtualenv -------------------------------- -One of the recommended virtual environment is `pyenv `_/`virtualenv `_. Please see `Flower examples `_ for details. +One of the recommended virtual environment is `pyenv +`_/`virtualenv +`_. Please see `Flower examples +`_ for details. -Once Pyenv is set up, you can use it to install `Python Version 3.10 `_ or above: +Once Pyenv is set up, you can use it to install `Python Version 3.10 +`_ or above: .. code-block:: shell @@ -32,34 +39,35 @@ Create the virtualenv with: pyenv virtualenv 3.10.12 flower-3.10.12 - Activate the virtualenv by running the following command: .. code-block:: shell echo flower-3.10.12 > .python-version - Virtualenv with Poetry ---------------------- -The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you simply create a virtual environment with: +The Flower examples are based on `Poetry `_ to manage +dependencies. After installing Poetry you simply create a virtual environment with: .. code-block:: shell poetry shell -If you open a new terminal you can activate the previously created virtual environment with the following command: +If you open a new terminal you can activate the previously created virtual environment +with the following command: .. code-block:: shell source $(poetry env info --path)/bin/activate - Virtualenv with Anaconda ------------------------ -If you prefer to use Anaconda for your virtual environment then install and setup the `conda `_ package. After setting it up you can create a virtual environment with: +If you prefer to use Anaconda for your virtual environment then install and setup the +`conda `_ +package. After setting it up you can create a virtual environment with: .. code-block:: shell @@ -71,8 +79,8 @@ and activate the virtual environment with: conda activate flower-3.10.12 - And then? --------- -As soon as you created your virtual environment you clone one of the `Flower examples `_. +As soon as you created your virtual environment you clone one of the `Flower examples +`_. diff --git a/doc/source/contributor-how-to-write-documentation.rst b/doc/source/contributor-how-to-write-documentation.rst index fcd8c5bb18c6..6209530b71e0 100644 --- a/doc/source/contributor-how-to-write-documentation.rst +++ b/doc/source/contributor-how-to-write-documentation.rst @@ -1,14 +1,15 @@ Write documentation =================== - Project layout -------------- -The Flower documentation lives in the ``doc`` directory. The Sphinx-based documentation system supports both reStructuredText (``.rst`` files) and Markdown (``.md`` files). - -Note that, in order to build the documentation locally (with ``poetry run make html``, like described below), `Pandoc `_ needs to be installed on the system. +The Flower documentation lives in the ``doc`` directory. The Sphinx-based documentation +system supports both reStructuredText (``.rst`` files) and Markdown (``.md`` files). +Note that, in order to build the documentation locally (with ``poetry run make html``, +like described below), `Pandoc `_ needs to be +installed on the system. Edit an existing page --------------------- @@ -17,7 +18,6 @@ Edit an existing page 2. Compile the docs: ``cd doc``, then ``poetry run make html`` 3. Open ``doc/build/html/index.html`` in the browser to check the result - Create a new page ----------------- diff --git a/doc/source/contributor-ref-good-first-contributions.rst b/doc/source/contributor-ref-good-first-contributions.rst index 2b8ce88413f5..a715e006f905 100644 --- a/doc/source/contributor-ref-good-first-contributions.rst +++ b/doc/source/contributor-ref-good-first-contributions.rst @@ -1,41 +1,41 @@ Good first contributions ======================== -We welcome contributions to Flower! However, it is not always easy to know -where to start. We therefore put together a few recommendations on where to -start to increase your chances of getting your PR accepted into the Flower -codebase. - +We welcome contributions to Flower! However, it is not always easy to know where to +start. We therefore put together a few recommendations on where to start to increase +your chances of getting your PR accepted into the Flower codebase. Where to start -------------- -Until the Flower core library matures it will be easier to get PR's accepted if -they only touch non-core areas of the codebase. Good candidates to get started -are: +Until the Flower core library matures it will be easier to get PR's accepted if they +only touch non-core areas of the codebase. Good candidates to get started are: - Documentation: What's missing? What could be expressed more clearly? - Baselines: See below. - Examples: See below. - Request for Flower Baselines ---------------------------- -If you are not familiar with Flower Baselines, you should probably check-out our `contributing guide for baselines `_. +If you are not familiar with Flower Baselines, you should probably check-out our +`contributing guide for baselines +`_. -You should then check out the open -`issues `_ for baseline requests. -If you find a baseline that you'd like to work on and that has no assignees, feel free to assign it to yourself and start working on it! +You should then check out the open `issues +`_ +for baseline requests. If you find a baseline that you'd like to work on and that has no +assignees, feel free to assign it to yourself and start working on it! -Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new issue with the baseline request template! +Otherwise, if you don't find a baseline you'd like to work on, be sure to open a new +issue with the baseline request template! Request for examples -------------------- -We wish we had more time to write usage examples because we believe they help -users to get started with building what they want to build. Here are a few -ideas where we'd be happy to accept a PR: +We wish we had more time to write usage examples because we believe they help users to +get started with building what they want to build. Here are a few ideas where we'd be +happy to accept a PR: - Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch - XGBoost diff --git a/doc/source/contributor-ref-secure-aggregation-protocols.rst b/doc/source/contributor-ref-secure-aggregation-protocols.rst index 7107d04b8cd0..347cb2724424 100644 --- a/doc/source/contributor-ref-secure-aggregation-protocols.rst +++ b/doc/source/contributor-ref-secure-aggregation-protocols.rst @@ -1,13 +1,16 @@ Secure Aggregation Protocols ============================ -Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been implemented yet, so its diagram and abstraction may not be accurate in practice. -The SecAgg protocol can be considered as a special case of the SecAgg+ protocol. +Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg protocol has not been +implemented yet, so its diagram and abstraction may not be accurate in practice. The +SecAgg protocol can be considered as a special case of the SecAgg+ protocol. -The :code:`SecAgg+` abstraction -------------------------------- +The ``SecAgg+`` abstraction +--------------------------- -In this implementation, each client will be assigned with a unique index (int) for secure aggregation, and thus many python dictionaries used have keys of int type rather than ClientProxy type. +In this implementation, each client will be assigned with a unique index (int) for +secure aggregation, and thus many python dictionaries used have keys of int type rather +than ClientProxy type. .. code-block:: python @@ -15,9 +18,7 @@ In this implementation, each client will be assigned with a unique index (int) f """Abstract base class for the SecAgg+ protocol implementations.""" @abstractmethod - def generate_graph( - self, clients: List[ClientProxy], k: int - ) -> ClientGraph: + def generate_graph(self, clients: List[ClientProxy], k: int) -> ClientGraph: """Build a k-degree undirected graph of clients. Each client will only generate pair-wise masks with its k neighbours. k is equal to the number of clients in SecAgg, i.e., a complete graph. @@ -31,16 +32,16 @@ In this implementation, each client will be assigned with a unique index (int) f @abstractmethod def ask_keys( - self, - clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] + self, clients: List[ClientProxy], ask_keys_ins_list: List[AskKeysIns] ) -> AskKeysResultsAndFailures: """Ask public keys. (AskKeysIns is an empty class, and hence ask_keys_ins_list can be omitted.)""" @abstractmethod def share_keys( self, - clients: List[ClientProxy], public_keys_dict: Dict[int, AskKeysRes], - graph: ClientGraph + clients: List[ClientProxy], + public_keys_dict: Dict[int, AskKeysRes], + graph: ClientGraph, ) -> ShareKeysResultsAndFailures: """Send public keys.""" @@ -48,17 +49,18 @@ In this implementation, each client will be assigned with a unique index (int) f def ask_vectors( clients: List[ClientProxy], forward_packet_list_dict: Dict[int, List[ShareKeysPacket]], - client_instructions=None: Dict[int, FitIns] + client_instructions: Dict[int, FitIns] = None, ) -> AskVectorsResultsAndFailures: """Ask vectors of local model parameters. (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.)""" + rather than trained parallelly as the protocol goes through the previous stages.) + """ @abstractmethod def unmask_vectors( clients: List[ClientProxy], dropout_clients: List[ClientProxy], - graph: ClientGraph + graph: ClientGraph, ) -> UnmaskVectorsResultsAndFailures: """Unmask and compute the aggregated model. UnmaskVectorRes contains shares of keys needed to generate masks.""" @@ -155,10 +157,12 @@ The Flower server will execute and process received results in the following ord deactivate P end -The :code:`LightSecAgg` abstraction ------------------------------------ +The ``LightSecAgg`` abstraction +------------------------------- -In this implementation, each client will be assigned with a unique index (int) for secure aggregation, and thus many python dictionaries used have keys of int type rather than ClientProxy type. +In this implementation, each client will be assigned with a unique index (int) for +secure aggregation, and thus many python dictionaries used have keys of int type rather +than ClientProxy type. .. code-block:: python @@ -174,7 +178,8 @@ In this implementation, each client will be assigned with a unique index (int) f @abstractmethod def ask_encrypted_encoded_masks( self, - clients: List[ClientProxy], public_keys_dict: Dict[int, LightSecAggSetupConfigRes] + clients: List[ClientProxy], + public_keys_dict: Dict[int, LightSecAggSetupConfigRes], ) -> AskEncryptedEncodedMasksResultsAndFailures: """Ask encrypted encoded masks. The protocol adopts Diffie-Hellman keys to build pair-wise secured channels to transfer encoded mask.""" @@ -183,15 +188,16 @@ In this implementation, each client will be assigned with a unique index (int) f self, clients: List[ClientProxy], forward_packet_list_dict: Dict[int, List[EncryptedEncodedMasksPacket]], - client_instructions=None: Dict[int, FitIns] + client_instructions: Dict[int, FitIns] = None, ) -> AskMaskedModelsResultsAndFailures: """Ask the masked local models. (If client_instructions is not None, local models will be trained in the ask vectors stage, - rather than trained parallelly as the protocol goes through the previous stages.)""" + rather than trained parallelly as the protocol goes through the previous stages.) + """ @abstractmethod def ask_aggregated_encoded_masks( - clients: List[ClientProxy] + clients: List[ClientProxy], ) -> AskAggregatedEncodedMasksResultsAndFailures: """Ask aggregated encoded masks""" @@ -272,158 +278,157 @@ Types .. code-block:: python - # the SecAgg+ protocol + # the SecAgg+ protocol + + ClientGraph = Dict[int, List[int]] - ClientGraph = Dict[int, List[int]] + SetupConfigResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] + ] - SetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, SetupConfigRes]], List[BaseException] - ] + AskKeysResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] + ] - AskKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskKeysRes]], List[BaseException] - ] + ShareKeysResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] + ] - ShareKeysResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, ShareKeysRes]], List[BaseException] - ] + AskVectorsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] + ] - AskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskVectorsRes]], List[BaseException] - ] + UnmaskVectorsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] + ] - UnmaskVectorsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, UnmaskVectorsRes]], List[BaseException] - ] + FitResultsAndFailures = Tuple[List[Tuple[ClientProxy, FitRes]], List[BaseException]] - FitResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, FitRes]], List[BaseException] - ] + @dataclass + class SetupConfigIns: + sec_agg_cfg_dict: Dict[str, Scalar] - @dataclass - class SetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] + @dataclass + class SetupConfigRes: + pass - @dataclass - class SetupConfigRes: - pass + @dataclass + class AskKeysIns: + pass - @dataclass - class AskKeysIns: - pass + @dataclass + class AskKeysRes: + """Ask Keys Stage Response from client to server""" - @dataclass - class AskKeysRes: - """Ask Keys Stage Response from client to server""" - pk1: bytes - pk2: bytes + pk1: bytes + pk2: bytes - @dataclass - class ShareKeysIns: - public_keys_dict: Dict[int, AskKeysRes] + @dataclass + class ShareKeysIns: + public_keys_dict: Dict[int, AskKeysRes] - @dataclass - class ShareKeysPacket: - source: int - destination: int - ciphertext: bytes + @dataclass + class ShareKeysPacket: + source: int + destination: int + ciphertext: bytes - @dataclass - class ShareKeysRes: - share_keys_res_list: List[ShareKeysPacket] + @dataclass + class ShareKeysRes: + share_keys_res_list: List[ShareKeysPacket] - @dataclass - class AskVectorsIns: - ask_vectors_in_list: List[ShareKeysPacket] - fit_ins: FitIns + @dataclass + class AskVectorsIns: + ask_vectors_in_list: List[ShareKeysPacket] + fit_ins: FitIns - @dataclass - class AskVectorsRes: - parameters: Parameters + @dataclass + class AskVectorsRes: + parameters: Parameters - @dataclass - class UnmaskVectorsIns: - available_clients: List[int] - dropout_clients: List[int] + @dataclass + class UnmaskVectorsIns: + available_clients: List[int] + dropout_clients: List[int] - @dataclass - class UnmaskVectorsRes: - share_dict: Dict[int, bytes] + @dataclass + class UnmaskVectorsRes: + share_dict: Dict[int, bytes] - # the LightSecAgg protocol + # the LightSecAgg protocol - LightSecAggSetupConfigResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] - ] + LightSecAggSetupConfigResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, LightSecAggSetupConfigRes]], List[BaseException] + ] - AskEncryptedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] - ] + AskEncryptedEncodedMasksResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskEncryptedEncodedMasksRes]], List[BaseException] + ] - AskMaskedModelsResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] - ] + AskMaskedModelsResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskMaskedModelsRes]], List[BaseException] + ] - AskAggregatedEncodedMasksResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] - ] + AskAggregatedEncodedMasksResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, AskAggregatedEncodedMasksRes]], List[BaseException] + ] - @dataclass - class LightSecAggSetupConfigIns: - sec_agg_cfg_dict: Dict[str, Scalar] + @dataclass + class LightSecAggSetupConfigIns: + sec_agg_cfg_dict: Dict[str, Scalar] - @dataclass - class LightSecAggSetupConfigRes: - pk: bytes + @dataclass + class LightSecAggSetupConfigRes: + pk: bytes - @dataclass - class AskEncryptedEncodedMasksIns: - public_keys_dict: Dict[int, LightSecAggSetupConfigRes] + @dataclass + class AskEncryptedEncodedMasksIns: + public_keys_dict: Dict[int, LightSecAggSetupConfigRes] - @dataclass - class EncryptedEncodedMasksPacket: - source: int - destination: int - ciphertext: bytes + @dataclass + class EncryptedEncodedMasksPacket: + source: int + destination: int + ciphertext: bytes - @dataclass - class AskEncryptedEncodedMasksRes: - packet_list: List[EncryptedEncodedMasksPacket] + @dataclass + class AskEncryptedEncodedMasksRes: + packet_list: List[EncryptedEncodedMasksPacket] - @dataclass - class AskMaskedModelsIns: - packet_list: List[EncryptedEncodedMasksPacket] - fit_ins: FitIns + @dataclass + class AskMaskedModelsIns: + packet_list: List[EncryptedEncodedMasksPacket] + fit_ins: FitIns - @dataclass - class AskMaskedModelsRes: - parameters: Parameters + @dataclass + class AskMaskedModelsRes: + parameters: Parameters - @dataclass - class AskAggregatedEncodedMasksIns: - surviving_clients: List[int] + @dataclass + class AskAggregatedEncodedMasksIns: + surviving_clients: List[int] - @dataclass - class AskAggregatedEncodedMasksRes: - aggregated_encoded_mask: Parameters + @dataclass + class AskAggregatedEncodedMasksRes: + aggregated_encoded_mask: Parameters diff --git a/doc/source/contributor-tutorial-contribute-on-github.rst b/doc/source/contributor-tutorial-contribute-on-github.rst index 6970e7e8a580..22c6c6ef86b0 100644 --- a/doc/source/contributor-tutorial-contribute-on-github.rst +++ b/doc/source/contributor-tutorial-contribute-on-github.rst @@ -1,100 +1,113 @@ Contribute on GitHub ==================== -This guide is for people who want to get involved with Flower, but who are not used to contributing to GitHub projects. - -If you're familiar with how contributing on GitHub works, you can directly checkout our :doc:`getting started guide for contributors `. +This guide is for people who want to get involved with Flower, but who are not used to +contributing to GitHub projects. +If you're familiar with how contributing on GitHub works, you can directly checkout our +:doc:`getting started guide for contributors +`. Setting up the repository ------------------------- 1. **Create a GitHub account and setup Git** - Git is a distributed version control tool. This allows for an entire codebase's history to be stored and every developer's machine. - It is a software that will need to be installed on your local machine, you can follow this `guide `_ to set it up. - - GitHub, itself, is a code hosting platform for version control and collaboration. It allows for everyone to collaborate and work from anywhere on remote repositories. - - If you haven't already, you will need to create an account on `GitHub `_. - - The idea behind the generic Git and GitHub workflow boils down to this: - you download code from a remote repository on GitHub, make changes locally and keep track of them using Git and then you upload your new history back to GitHub. - + Git is a distributed version control tool. This allows for an entire codebase's + history to be stored and every developer's machine. It is a software that will + need to be installed on your local machine, you can follow this `guide + `_ to + set it up. + + GitHub, itself, is a code hosting platform for version control and collaboration. + It allows for everyone to collaborate and work from anywhere on remote + repositories. + + If you haven't already, you will need to create an account on `GitHub + `_. + + The idea behind the generic Git and GitHub workflow boils down to this: you + download code from a remote repository on GitHub, make changes locally and keep + track of them using Git and then you upload your new history back to GitHub. 2. **Forking the Flower repository** - A fork is a personal copy of a GitHub repository. To create one for Flower, you must navigate to ``_ (while connected to your GitHub account) - and click the ``Fork`` button situated on the top right of the page. - - .. image:: _static/fork_button.png + A fork is a personal copy of a GitHub repository. To create one for Flower, you + must navigate to https://github.com/adap/flower (while connected to your GitHub + account) and click the ``Fork`` button situated on the top right of the page. - You can change the name if you want, but this is not necessary as this version of Flower will be yours and will sit inside your own account (i.e., in your own list of repositories). - Once created, you should see on the top left corner that you are looking at your own version of Flower. + .. image:: _static/fork_button.png - .. image:: _static/fork_link.png + You can change the name if you want, but this is not necessary as this version of + Flower will be yours and will sit inside your own account (i.e., in your own list + of repositories). Once created, you should see on the top left corner that you + are looking at your own version of Flower. + .. image:: _static/fork_link.png 3. **Cloning your forked repository** - The next step is to download the forked repository on your machine to be able to make changes to it. - On your forked repository page, you should first click on the ``Code`` button on the right, - this will give you the ability to copy the HTTPS link of the repository. + The next step is to download the forked repository on your machine to be able to + make changes to it. On your forked repository page, you should first click on the + ``Code`` button on the right, this will give you the ability to copy the HTTPS + link of the repository. - .. image:: _static/cloning_fork.png + .. image:: _static/cloning_fork.png - Once you copied the \, you can open a terminal on your machine, navigate to the place you want to download the repository to and type: + Once you copied the \, you can open a terminal on your machine, navigate to + the place you want to download the repository to and type: - .. code-block:: shell + .. code-block:: shell - $ git clone - - This will create a ``flower/`` (or the name of your fork if you renamed it) folder in the current working directory. + $ git clone + This will create a ``flower/`` (or the name of your fork if you renamed it) + folder in the current working directory. 4. **Add origin** - You can then go into the repository folder: - - .. code-block:: shell - - $ cd flower + You can then go into the repository folder: - And here we will need to add an origin to our repository. The origin is the \ of the remote fork repository. - To obtain it, we can do as previously mentioned by going to our fork repository on our GitHub account and copying the link. + .. code-block:: shell - .. image:: _static/cloning_fork.png + $ cd flower - Once the \ is copied, we can type the following command in our terminal: + And here we will need to add an origin to our repository. The origin is the + \ of the remote fork repository. To obtain it, we can do as previously + mentioned by going to our fork repository on our GitHub account and copying the + link. - .. code-block:: shell + .. image:: _static/cloning_fork.png - $ git remote add origin + Once the \ is copied, we can type the following command in our terminal: + .. code-block:: shell + $ git remote add origin 5. **Add upstream** - Now we will add an upstream address to our repository. - Still in the same directory, we must run the following command: + Now we will add an upstream address to our repository. Still in the same + directory, we must run the following command: - .. code-block:: shell + .. code-block:: shell - $ git remote add upstream https://github.com/adap/flower.git + $ git remote add upstream https://github.com/adap/flower.git - The following diagram visually explains what we did in the previous steps: + The following diagram visually explains what we did in the previous steps: - .. image:: _static/github_schema.png + .. image:: _static/github_schema.png - The upstream is the GitHub remote address of the parent repository (in this case Flower), - i.e. the one we eventually want to contribute to and therefore need an up-to-date history of. - The origin is just the GitHub remote address of the forked repository we created, i.e. the copy (fork) in our own account. + The upstream is the GitHub remote address of the parent repository (in this case + Flower), i.e. the one we eventually want to contribute to and therefore need an + up-to-date history of. The origin is just the GitHub remote address of the forked + repository we created, i.e. the copy (fork) in our own account. - To make sure our local version of the fork is up-to-date with the latest changes from the Flower repository, - we can execute the following command: + To make sure our local version of the fork is up-to-date with the latest changes + from the Flower repository, we can execute the following command: - .. code-block:: shell - - $ git pull upstream main + .. code-block:: shell + $ git pull upstream main Setting up the coding environment --------------------------------- -This can be achieved by following this :doc:`getting started guide for contributors ` (note that you won't need to clone the repository). -Once you are able to write code and test it, you can finally start making changes! - +This can be achieved by following this :doc:`getting started guide for contributors +` (note that you won't need to clone +the repository). Once you are able to write code and test it, you can finally start +making changes! Making changes -------------- @@ -112,211 +125,233 @@ And with Flower's repository: $ git pull upstream main 1. **Create a new branch** - To make the history cleaner and easier to work with, it is good practice to - create a new branch for each feature/project that needs to be implemented. - - To do so, just run the following command inside the repository's directory: + To make the history cleaner and easier to work with, it is good practice to + create a new branch for each feature/project that needs to be implemented. - .. code-block:: shell + To do so, just run the following command inside the repository's directory: - $ git switch -c + .. code-block:: shell + $ git switch -c 2. **Make changes** - Write great code and create wonderful changes using your favorite editor! - + Write great code and create wonderful changes using your favorite editor! 3. **Test and format your code** - Don't forget to test and format your code! Otherwise your code won't be able to be merged into the Flower repository. - This is done so the codebase stays consistent and easy to understand. - - To do so, we have written a few scripts that you can execute: + Don't forget to test and format your code! Otherwise your code won't be able to + be merged into the Flower repository. This is done so the codebase stays + consistent and easy to understand. - .. code-block:: shell + To do so, we have written a few scripts that you can execute: - $ ./dev/format.sh # to format your code - $ ./dev/test.sh # to test that your code can be accepted - $ ./baselines/dev/format.sh # same as above but for code added to baselines - $ ./baselines/dev/test.sh # same as above but for code added to baselines + .. code-block:: shell + $ ./dev/format.sh # to format your code + $ ./dev/test.sh # to test that your code can be accepted + $ ./baselines/dev/format.sh # same as above but for code added to baselines + $ ./baselines/dev/test.sh # same as above but for code added to baselines 4. **Stage changes** - Before creating a commit that will update your history, you must specify to Git which files it needs to take into account. - - This can be done with: + Before creating a commit that will update your history, you must specify to Git + which files it needs to take into account. - .. code-block:: shell + This can be done with: - $ git add + .. code-block:: shell - To check which files have been modified compared to the last version (last commit) and to see which files are staged for commit, - you can use the :code:`git status` command. + $ git add + To check which files have been modified compared to the last version (last + commit) and to see which files are staged for commit, you can use the ``git + status`` command. 5. **Commit changes** - Once you have added all the files you wanted to commit using :code:`git add`, you can finally create your commit using this command: + Once you have added all the files you wanted to commit using ``git add``, you can + finally create your commit using this command: - .. code-block:: shell + .. code-block:: shell - $ git commit -m "" - - The \ is there to explain to others what the commit does. It should be written in an imperative style and be concise. - An example would be :code:`git commit -m "Add images to README"`. + $ git commit -m "" + The \ is there to explain to others what the commit does. It + should be written in an imperative style and be concise. An example would be + ``git commit -m "Add images to README"``. 6. **Push the changes to the fork** - Once we have committed our changes, we have effectively updated our local history, but GitHub has no way of knowing this unless we push - our changes to our origin's remote address: - - .. code-block:: shell + Once we have committed our changes, we have effectively updated our local + history, but GitHub has no way of knowing this unless we push our changes to our + origin's remote address: - $ git push -u origin + .. code-block:: shell - Once this is done, you will see on the GitHub that your forked repo was updated with the changes you have made. + $ git push -u origin + Once this is done, you will see on the GitHub that your forked repo was updated + with the changes you have made. Creating and merging a pull request (PR) ---------------------------------------- 1. **Create the PR** - Once you have pushed changes, on the GitHub webpage of your repository you should see the following message: - - .. image:: _static/compare_and_pr.png + Once you have pushed changes, on the GitHub webpage of your repository you should + see the following message: - Otherwise you can always find this option in the ``Branches`` page. + .. image:: _static/compare_and_pr.png - Once you click the ``Compare & pull request`` button, you should see something similar to this: + Otherwise you can always find this option in the ``Branches`` page. - .. image:: _static/creating_pr.png + Once you click the ``Compare & pull request`` button, you should see something + similar to this: - At the top you have an explanation of which branch will be merged where: + .. image:: _static/creating_pr.png - .. image:: _static/merging_branch.png + At the top you have an explanation of which branch will be merged where: - In this example you can see that the request is to merge the branch ``doc-fixes`` from my forked repository to branch ``main`` from the Flower repository. + .. image:: _static/merging_branch.png - The title should be changed to adhere to the :ref:`pr_title_format` guidelines, otherwise it won't be possible to merge the PR. So in this case, - a correct title might be ``docs(framework:skip) Fix typos``. + In this example you can see that the request is to merge the branch ``doc-fixes`` + from my forked repository to branch ``main`` from the Flower repository. - The input box in the middle is there for you to describe what your PR does and to link it to existing issues. - We have placed comments (that won't be rendered once the PR is opened) to guide you through the process. + The title should be changed to adhere to the :ref:`pr_title_format` guidelines, + otherwise it won't be possible to merge the PR. So in this case, a correct title + might be ``docs(framework:skip) Fix typos``. - It is important to follow the instructions described in comments. + The input box in the middle is there for you to describe what your PR does and to + link it to existing issues. We have placed comments (that won't be rendered once + the PR is opened) to guide you through the process. - At the bottom you will find the button to open the PR. This will notify reviewers that a new PR has been opened and - that they should look over it to merge or to request changes. + It is important to follow the instructions described in comments. - If your PR is not yet ready for review, and you don't want to notify anyone, you have the option to create a draft pull request: + At the bottom you will find the button to open the PR. This will notify reviewers + that a new PR has been opened and that they should look over it to merge or to + request changes. - .. image:: _static/draft_pr.png + If your PR is not yet ready for review, and you don't want to notify anyone, you + have the option to create a draft pull request: + .. image:: _static/draft_pr.png 2. **Making new changes** - Once the PR has been opened (as draft or not), you can still push new commits to it the same way we did before, by making changes to the branch associated with the PR. - + Once the PR has been opened (as draft or not), you can still push new commits to + it the same way we did before, by making changes to the branch associated with + the PR. 3. **Review the PR** - Once the PR has been opened or once the draft PR has been marked as ready, a review from code owners will be automatically requested: - - .. image:: _static/opened_pr.png - - Code owners will then look into the code, ask questions, request changes or validate the PR. + Once the PR has been opened or once the draft PR has been marked as ready, a + review from code owners will be automatically requested: - Merging will be blocked if there are ongoing requested changes. + .. image:: _static/opened_pr.png - .. image:: _static/changes_requested.png + Code owners will then look into the code, ask questions, request changes or + validate the PR. - To resolve them, just push the necessary changes to the branch associated with the PR: + Merging will be blocked if there are ongoing requested changes. - .. image:: _static/make_changes.png + .. image:: _static/changes_requested.png - And resolve the conversation: + To resolve them, just push the necessary changes to the branch associated with + the PR: - .. image:: _static/resolve_conv.png + .. image:: _static/make_changes.png - Once all the conversations have been resolved, you can re-request a review. + And resolve the conversation: + .. image:: _static/resolve_conv.png + Once all the conversations have been resolved, you can re-request a review. 4. **Once the PR is merged** - If all the automatic tests have passed and reviewers have no more changes to request, they can approve the PR and merge it. + If all the automatic tests have passed and reviewers have no more changes to + request, they can approve the PR and merge it. - .. image:: _static/merging_pr.png + .. image:: _static/merging_pr.png - Once it is merged, you can delete the branch on GitHub (a button should appear to do so) and also delete it locally by doing: + Once it is merged, you can delete the branch on GitHub (a button should appear to + do so) and also delete it locally by doing: - .. code-block:: shell + .. code-block:: shell - $ git switch main - $ git branch -D + $ git switch main + $ git branch -D - Then you should update your forked repository by doing: + Then you should update your forked repository by doing: - .. code-block:: shell - - $ git pull upstream main # to update the local repository - $ git push origin main # to push the changes to the remote repository + .. code-block:: shell + $ git pull upstream main # to update the local repository + $ git push origin main # to push the changes to the remote repository Example of first contribution ----------------------------- Problem -******* +~~~~~~~ -For our documentation, we've started to use the `Diàtaxis framework `_. +For our documentation, we've started to use the `Diàtaxis framework +`_. -Our "How to" guides should have titles that continue the sentence "How to …", for example, "How to upgrade to Flower 1.0". +Our "How to" guides should have titles that continue the sentence "How to …", for +example, "How to upgrade to Flower 1.0". -Most of our guides do not follow this new format yet, and changing their title is (unfortunately) more involved than one might think. +Most of our guides do not follow this new format yet, and changing their title is +(unfortunately) more involved than one might think. -This issue is about changing the title of a doc from present continuous to present simple. +This issue is about changing the title of a doc from present continuous to present +simple. -Let's take the example of "Saving Progress" which we changed to "Save Progress". Does this pass our check? +Let's take the example of "Saving Progress" which we changed to "Save Progress". Does +this pass our check? Before: "How to saving progress" ❌ After: "How to save progress" ✅ Solution -******** +~~~~~~~~ -This is a tiny change, but it'll allow us to test your end-to-end setup. After cloning and setting up the Flower repo, here's what you should do: +This is a tiny change, but it'll allow us to test your end-to-end setup. After cloning +and setting up the Flower repo, here's what you should do: - Find the source file in ``doc/source`` -- Make the change in the ``.rst`` file (beware, the dashes under the title should be the same length as the title itself) -- Build the docs and `check the result `_ +- Make the change in the ``.rst`` file (beware, the dashes under the title should be the + same length as the title itself) +- Build the docs and `check the result + `_ Rename file -::::::::::: ++++++++++++ -You might have noticed that the file name still reflects the old wording. -If we just change the file, then we break all existing links to it - it is **very important** to avoid that, breaking links can harm our search engine ranking. +You might have noticed that the file name still reflects the old wording. If we just +change the file, then we break all existing links to it - it is **very important** to +avoid that, breaking links can harm our search engine ranking. Here's how to change the file name: - Change the file name to ``save-progress.rst`` - Add a redirect rule to ``doc/source/conf.py`` -This will cause a redirect from ``saving-progress.html`` to ``save-progress.html``, old links will continue to work. +This will cause a redirect from ``saving-progress.html`` to ``save-progress.html``, old +links will continue to work. Apply changes in the index file -::::::::::::::::::::::::::::::: ++++++++++++++++++++++++++++++++ -For the lateral navigation bar to work properly, it is very important to update the ``index.rst`` file as well. -This is where we define the whole arborescence of the navbar. +For the lateral navigation bar to work properly, it is very important to update the +``index.rst`` file as well. This is where we define the whole arborescence of the +navbar. - Find and modify the file name in ``index.rst`` Open PR -::::::: ++++++++ -- Commit the changes (commit messages are always imperative: "Do something", in this case "Change …") +- Commit the changes (commit messages are always imperative: "Do something", in this + case "Change …") - Push the changes to your fork - Open a PR (as shown above) with title ``docs(framework) Update how-to guide title`` - Wait for it to be approved! - Congrats! 🥳 You're now officially a Flower contributor! - Next steps ---------- -Once you have made your first PR, and want to contribute more, be sure to check out the following : - -- :doc:`Good first contributions `, where you should particularly look into the :code:`baselines` contributions. +Once you have made your first PR, and want to contribute more, be sure to check out the +following : +- :doc:`Good first contributions `, where you + should particularly look into the ``baselines`` contributions. Appendix -------- @@ -324,7 +359,7 @@ Appendix .. _pr_title_format: PR title format -*************** +~~~~~~~~~~~~~~~ We enforce the following PR title format: @@ -334,9 +369,10 @@ We enforce the following PR title format: (or ``(:skip) `` to ignore the PR in the changelog) -Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, ```` -should be in ``{framework, baselines, datasets, examples, or '*' when modifying multiple projects which requires the ':skip' flag to be used}``, -and ```` starts with a capitalised verb in the imperative mood. +Where ```` needs to be in ``{ci, fix, feat, docs, refactor, break}``, +```` should be in ``{framework, baselines, datasets, examples, or '*' when +modifying multiple projects which requires the ':skip' flag to be used}``, and +```` starts with a capitalised verb in the imperative mood. Valid examples: diff --git a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst index d7d647996a3d..11b0d3760d4a 100644 --- a/doc/source/contributor-tutorial-get-started-as-a-contributor.rst +++ b/doc/source/contributor-tutorial-get-started-as-a-contributor.rst @@ -4,168 +4,195 @@ Get started as a contributor Prerequisites ------------- -- `Python 3.8 `_ or above +- `Python 3.9 `_ or above - `Poetry 1.3 `_ or above - (Optional) `pyenv `_ - (Optional) `pyenv-virtualenv `_ -Flower uses :code:`pyproject.toml` to manage dependencies and configure -development tools (the ones which support it). Poetry is a build tool which -supports `PEP 517 `_. - +Flower uses ``pyproject.toml`` to manage dependencies and configure development tools +(the ones which support it). Poetry is a build tool which supports `PEP 517 +`_. Developer Machine Setup ----------------------- Preliminaries ~~~~~~~~~~~~~ + Some system-wide dependencies are needed. For macOS -^^^^^^^^^ ++++++++++ + +- Install `homebrew `_. Don't forget the post-installation actions to + add `brew` to your PATH. +- Install `xz` (to install different Python versions) and `pandoc` to build the docs: -* Install `homebrew `_. Don't forget the post-installation actions to add `brew` to your PATH. -* Install `xz` (to install different Python versions) and `pandoc` to build the - docs:: + :: - $ brew install xz pandoc + $ brew install xz pandoc For Ubuntu -^^^^^^^^^^ -Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary -packages:: +++++++++++ - $ apt update - $ apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ - libreadline-dev libbz2-dev libffi-dev liblzma-dev pandoc +Ensure you system (Ubuntu 22.04+) is up-to-date, and you have all necessary packages: +:: + + $ apt update + $ apt install build-essential zlib1g-dev libssl-dev libsqlite3-dev \ + libreadline-dev libbz2-dev libffi-dev liblzma-dev pandoc Create Flower Dev Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -1. Clone the `Flower repository `_ from -GitHub:: +1. Clone the `Flower repository `_ from GitHub: - $ git clone git@github.com:adap/flower.git - $ cd flower +:: -2. Let's create the Python environment for all-things Flower. If you wish to use :code:`pyenv`, we provide two convenience scripts that you can use. If you prefer using something else than :code:`pyenv`, create a new environment, activate and skip to the last point where all packages are installed. + $ git clone git@github.com:adap/flower.git + $ cd flower -* If you don't have :code:`pyenv` installed, the following script that will install it, set it up, and create the virtual environment (with :code:`Python 3.8.17` by default):: +2. Let's create the Python environment for all-things Flower. If you wish to use + ``pyenv``, we provide two convenience scripts that you can use. If you prefer using + something else than ``pyenv``, create a new environment, activate and skip to the + last point where all packages are installed. - $ ./dev/setup-defaults.sh # once completed, run the bootstrap script +- If you don't have ``pyenv`` installed, the following script that will install it, set + it up, and create the virtual environment (with :substitution-code:`Python + |python_full_version|` by default): -* If you already have :code:`pyenv` installed (along with the :code:`pyenv-virtualenv` plugin), you can use the following convenience script (with :code:`Python 3.8.17` by default):: + :: - $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script + $ ./dev/setup-defaults.sh # once completed, run the bootstrap script -3. Install the Flower package in development mode (think -:code:`pip install -e`) along with all necessary dependencies:: +- If you already have ``pyenv`` installed (along with the ``pyenv-virtualenv`` plugin), + you can use the following convenience script (with :substitution-code:`Python + |python_full_version|` by default): - (flower-) $ ./dev/bootstrap.sh + :: + $ ./dev/venv-create.sh # once completed, run the `bootstrap.sh` script + +3. Install the Flower package in development mode (think ``pip install -e``) along with +all necessary dependencies: + +:: + + (flower-) $ ./dev/bootstrap.sh Convenience Scripts ------------------- -The Flower repository contains a number of convenience scripts to make -recurring development tasks easier and less error-prone. See the :code:`/dev` -subdirectory for a full list. The following scripts are amongst the most -important ones: +The Flower repository contains a number of convenience scripts to make recurring +development tasks easier and less error-prone. See the ``/dev`` subdirectory for a full +list. The following scripts are amongst the most important ones: Create/Delete Virtual Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -:: +.. code-block:: shell + :substitutions: - $ ./dev/venv-create.sh # Default is 3.8.17 - $ ./dev/venv-delete.sh # Default is 3.8.17 + $ ./dev/venv-create.sh # Default is |python_full_version| + $ ./dev/venv-delete.sh # Default is |python_full_version| Compile ProtoBuf Definitions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :: - $ python -m flwr_tool.protoc + $ python -m flwr_tool.protoc Auto-Format Code ~~~~~~~~~~~~~~~~ :: - $ ./dev/format.sh + $ ./dev/format.sh Run Linters and Tests ~~~~~~~~~~~~~~~~~~~~~ :: - $ ./dev/test.sh + $ ./dev/test.sh Add a pre-commit hook ~~~~~~~~~~~~~~~~~~~~~ -Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit `_ library. The pre-commit hook is configured to execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. +Developers may integrate a pre-commit hook into their workflow utilizing the `pre-commit +`_ library. The pre-commit hook is configured to +execute two primary operations: ``./dev/format.sh`` and ``./dev/test.sh`` scripts. There are multiple ways developers can use this: 1. Install the pre-commit hook to your local git directory by simply running: :: - - $ pre-commit install - - Each ``git commit`` will trigger the execution of formatting and linting/test scripts. - - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` command. + $ pre-commit install + + - Each ``git commit`` will trigger the execution of formatting and linting/test + scripts. + - If in a hurry, bypass the hook using ``--no-verify`` with the ``git commit`` + command. + :: - - $ git commit --no-verify -m "Add new feature" - -2. For developers who prefer not to install the hook permanently, it is possible to execute a one-time check prior to committing changes by using the following command: - + + $ git commit --no-verify -m "Add new feature" + +2. For developers who prefer not to install the hook permanently, it is possible to + execute a one-time check prior to committing changes by using the following command: + :: - $ pre-commit run --all-files - - This executes the formatting and linting checks/tests on all the files without modifying the default behavior of ``git commit``. + $ pre-commit run --all-files + + This executes the formatting and linting checks/tests on all the files without + modifying the default behavior of ``git commit``. Run Github Actions (CI) locally ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Developers could run the full set of Github Actions workflows under their local -environment by using `Act `_. Please refer to -the installation instructions under the linked repository and run the next -command under Flower main cloned repository folder:: +environment by using `Act `_. Please refer to the +installation instructions under the linked repository and run the next command under +Flower main cloned repository folder: - $ act +:: -The Flower default workflow would run by setting up the required Docker -machines underneath. + $ act +The Flower default workflow would run by setting up the required Docker machines +underneath. Build Release ------------- -Flower uses Poetry to build releases. The necessary command is wrapped in a -simple script:: +Flower uses Poetry to build releases. The necessary command is wrapped in a simple +script: - $ ./dev/build.sh +:: -The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in the -:code:`/dist` subdirectory. + $ ./dev/build.sh +The resulting ``.whl`` and ``.tar.gz`` releases will be stored in the ``/dist`` +subdirectory. Build Documentation ------------------- Flower's documentation uses `Sphinx `_. There's no -convenience script to re-build the documentation yet, but it's pretty easy:: +convenience script to re-build the documentation yet, but it's pretty easy: + +:: - $ cd doc - $ make html + $ cd doc + $ make html This will generate HTML documentation in ``doc/build/html``. -Note that, in order to build the documentation locally -(with ``poetry run make html``, like described below), -`Pandoc `_ needs to be installed on the system. +Note that, in order to build the documentation locally (with ``poetry run make html``, +like described below), `Pandoc `_ needs to be +installed on the system. diff --git a/doc/source/docker/enable-tls.rst b/doc/source/docker/enable-tls.rst index ac604b708f88..7225f86a5ddb 100644 --- a/doc/source/docker/enable-tls.rst +++ b/doc/source/docker/enable-tls.rst @@ -1,152 +1,152 @@ Enable TLS for Secure Connections ================================= -When operating in a production environment, it is strongly recommended to enable Transport Layer -Security (TLS) for each Flower Component to ensure secure communication. +When operating in a production environment, it is strongly recommended to enable +Transport Layer Security (TLS) for each Flower Component to ensure secure communication. -To enable TLS, you will need a PEM-encoded root certificate, a PEM-encoded private key and a -PEM-encoded certificate chain. +To enable TLS, you will need a PEM-encoded root certificate, a PEM-encoded private key +and a PEM-encoded certificate chain. .. note:: - For testing purposes, you can generate your own self-signed certificates. The - `Enable SSL connections `__ - page contains a section that will guide you through the process. + For testing purposes, you can generate your own self-signed certificates. The + `Enable SSL connections + `__ + page contains a section that will guide you through the process. +Because Flower containers, by default, run with a non-root user ``app``, the mounted +files and directories must have the proper permissions for the user ID ``49999``. -Because Flower containers, by default, run with a non-root user ``app``, the mounted files and -directories must have the proper permissions for the user ID ``49999``. +For example, to change the user ID of all files in the ``certificates/`` directory, you +can run ``sudo chown -R 49999:49999 certificates/*``. -For example, to change the user ID of all files in the ``certificates/`` directory, you can run -``sudo chown -R 49999:49999 certificates/*``. - -If you later want to delete the directory, you can change the user ID back to the current user -ID by running ``sudo chown -R $USER:$(id -gn) state``. +If you later want to delete the directory, you can change the user ID back to the +current user ID by running ``sudo chown -R $USER:$(id -gn) certificates``. SuperLink --------- -Assuming all files we need are in the local ``certificates`` directory, we can use the flag -``--volume`` to mount the local directory into the ``/app/certificates/`` directory of the container: +Assuming all files we need are in the local ``certificates`` directory, we can use the +flag ``--volume`` to mount the local directory into the ``/app/certificates/`` directory +of the container: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm \ - --volume ./certificates/:/app/certificates/:ro \ - flwr/superlink:|stable_flwr_version| \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key + $ docker run --rm \ + --volume ./certificates/:/app/certificates/:ro \ + flwr/superlink:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key .. dropdown:: Understanding the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in - | the current working directory of the host machine as a read-only volume at the - | ``/app/certificates`` directory inside the container. - | - | This allows the container to access the TLS certificates that are stored in the certificates - | directory. - * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file - | inside the container. - | - | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the - | SuperLink. - * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's - | TLS certificate file inside the container. - | - | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the - | data that is transmitted over the network. - * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's - | TLS private key file inside the container. - | - | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over - | the network. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in + | the current working directory of the host machine as a read-only volume at the + | ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperLink. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperLink's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperLink and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperLink's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. SuperNode --------- -Assuming that the ``ca.crt`` certificate already exists locally, we can use the flag ``--volume`` to mount the local -certificate into the container's ``/app/`` directory. +Assuming that the ``ca.crt`` certificate already exists locally, we can use the flag +``--volume`` to mount the local certificate into the container's ``/app/`` directory. .. note:: - If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't exist - on the SuperNode, you can copy it over after the generation step. + If you're generating self-signed certificates and the ``ca.crt`` certificate doesn't + exist on the SuperNode, you can copy it over after the generation step. .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm \ - --volume ./ca.crt:/app/ca.crt/:ro \ - flwr/supernode:|stable_flwr_version| \ - --root-certificates ca.crt + $ docker run --rm \ + --volume ./ca.crt:/app/ca.crt/:ro \ + flwr/supernode:|stable_flwr_version| \ + --root-certificates ca.crt .. dropdown:: Understanding the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the - | current working directory of the host machine as a read-only volume at the ``/app/ca.crt`` - | directory inside the container. - * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file - | inside the container. - | - | The ``ca.crt`` file is used to verify the identity of the SuperLink. - + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./ca.crt:/app/ca.crt/:ro``: Mount the ``ca.crt`` file from the + | current working directory of the host machine as a read-only volume at the ``/app/ca.crt`` + | directory inside the container. + * | :substitution-code:`flwr/supernode:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--root-certificates ca.crt``: This specifies the location of the CA certificate file + | inside the container. + | + | The ``ca.crt`` file is used to verify the identity of the SuperLink. SuperExec --------- -Assuming all files we need are in the local ``certificates`` directory where the SuperExec will be executed from, we can use the flag -``--volume`` to mount the local directory into the ``/app/certificates/`` directory of the container: +Assuming all files we need are in the local ``certificates`` directory where the +SuperExec will be executed from, we can use the flag ``--volume`` to mount the local +directory into the ``/app/certificates/`` directory of the container: .. code-block:: bash - :substitutions: - - $ docker run --rm \ - --volume ./certificates/:/app/certificates/:ro \ - flwr/superexec:|stable_flwr_version| \ - --ssl-ca-certfile certificates/ca.crt \ - --ssl-certfile certificates/server.pem \ - --ssl-keyfile certificates/server.key \ - --executor-config \ - root-certificates=\"certificates/superlink_ca.crt\" + :substitutions: + $ docker run --rm \ + --volume ./certificates/:/app/certificates/:ro \ + flwr/superexec:|stable_flwr_version| \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key \ + --executor-config \ + root-certificates=\"certificates/superlink_ca.crt\" .. dropdown:: Understanding the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in - | the current working directory of the host machine as a read-only volume at the - | ``/app/certificates`` directory inside the container. - | - | This allows the container to access the TLS certificates that are stored in the certificates - | directory. - * | :substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. - * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file - | inside the container. - | - | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the - | SuperExec. - * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperExec's - | TLS certificate file inside the container. - | - | The ``certificates/server.pem`` file is used to identify the SuperExec and to encrypt the - | data that is transmitted over the network. - * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperExec's - | TLS private key file inside the container. - | - | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over - | the network. - * | ``--executor-config root-certificates=\"certificates/superlink_ca.crt\"``: Specify the - | location of the CA certificate file inside the container that the SuperExec executor - | should use to verify the SuperLink's identity. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``--volume ./certificates/:/app/certificates/:ro``: Mount the ``certificates`` directory in + | the current working directory of the host machine as a read-only volume at the + | ``/app/certificates`` directory inside the container. + | + | This allows the container to access the TLS certificates that are stored in the certificates + | directory. + * | :substitution-code:`flwr/superexec:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a specific version of the image. + * | ``--ssl-ca-certfile certificates/ca.crt``: Specify the location of the CA certificate file + | inside the container. + | + | The ``certificates/ca.crt`` file is a certificate that is used to verify the identity of the + | SuperExec. + * | ``--ssl-certfile certificates/server.pem``: Specify the location of the SuperExec's + | TLS certificate file inside the container. + | + | The ``certificates/server.pem`` file is used to identify the SuperExec and to encrypt the + | data that is transmitted over the network. + * | ``--ssl-keyfile certificates/server.key``: Specify the location of the SuperExec's + | TLS private key file inside the container. + | + | The ``certificates/server.key`` file is used to decrypt the data that is transmitted over + | the network. + * | ``--executor-config root-certificates=\"certificates/superlink_ca.crt\"``: Specify the + | location of the CA certificate file inside the container that the SuperExec executor + | should use to verify the SuperLink's identity. diff --git a/doc/source/docker/index.rst b/doc/source/docker/index.rst index a070a47cb853..6449317ef19a 100644 --- a/doc/source/docker/index.rst +++ b/doc/source/docker/index.rst @@ -1,45 +1,47 @@ Run Flower using Docker ======================= -Start your Flower journey with our pre-made Docker images on Docker Hub, supporting ``amd64`` -and ``arm64v8`` architectures. +Start your Flower journey with our pre-made Docker images on Docker Hub, supporting +``amd64`` and ``arm64v8`` architectures. -Our Quickstart guide walks you through containerizing a Flower project and running it end to -end using Docker. +Our Quickstart guide walks you through containerizing a Flower project and running it +end to end using Docker. Getting Started --------------- .. toctree:: - :maxdepth: 1 - - tutorial-quickstart-docker + :maxdepth: 1 + tutorial-quickstart-docker Running in Production --------------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - enable-tls - persist-superlink-state + enable-tls + persist-superlink-state Advanced Options ---------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - set-environment-variables - run-as-root-user - pin-version - use-a-different-version + set-environment-variables + run-as-root-user + run-as-subprocess + pin-version + use-a-different-version -Run Flower Docker Compose -------------------------- +Run Flower using Docker Compose +------------------------------- .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - tutorial-quickstart-docker-compose + tutorial-quickstart-docker-compose + run-quickstart-examples-docker-compose + tutorial-deploy-on-multiple-machines diff --git a/doc/source/docker/persist-superlink-state.rst b/doc/source/docker/persist-superlink-state.rst index 68e04ed33762..214e408c44c3 100644 --- a/doc/source/docker/persist-superlink-state.rst +++ b/doc/source/docker/persist-superlink-state.rst @@ -1,39 +1,40 @@ Persist the State of the SuperLink ================================== -By default, the Flower SuperLink keeps its state in-memory. When using the Docker flag ``--rm``, the -state is not persisted between container starts. +By default, the Flower SuperLink keeps its state in-memory. When using the Docker flag +``--rm``, the state is not persisted between container starts. -If you want to persist the state of the SuperLink on your host system, all you need to do is specify -a directory where you want to save the file on your host system and a name for the database file. +If you want to persist the state of the SuperLink on your host system, all you need to +do is specify a directory where you want to save the file on your host system and a name +for the database file. -By default, the SuperLink container runs with a non-root user called ``app`` with the user ID -``49999``. It is recommended to create a new directory and change the user ID of the directory to -``49999`` to ensure the mounted directory has the proper permissions. +By default, the SuperLink container runs with a non-root user called ``app`` with the +user ID ``49999``. It is recommended to create a new directory and change the user ID of +the directory to ``49999`` to ensure the mounted directory has the proper permissions. -If you later want to delete the directory, you can change the user ID back to the current user -ID by running ``sudo chown -R $USER:$(id -gn) state``. +If you later want to delete the directory, you can change the user ID back to the +current user ID by running ``sudo chown -R $USER:$(id -gn) state``. Example ------- -In the example below, we create a new directory called ``state``, change the user ID and tell -Docker via the flag ``--volume`` to mount the local ``state`` directory into the ``/app/state`` -directory of the container. Lastly, we use the flag ``--database`` to specify the name of the -database file. +In the example below, we create a new directory called ``state``, change the user ID and +tell Docker via the flag ``--volume`` to mount the local ``state`` directory into the +``/app/state`` directory of the container. Lastly, we use the flag ``--database`` to +specify the name of the database file. .. code-block:: bash - :substitutions: - - $ mkdir state - $ sudo chown -R 49999:49999 state - $ docker run --rm \ - --volume ./state/:/app/state flwr/superlink:|stable_flwr_version| \ - --database state.db \ - ... - -As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` directory on -your host system. If the file already exists, the SuperLink tries to restore the state from the -file. To start the SuperLink with an empty database, ensure that there is no database -called ``state.db`` in the ``state`` directory (``rm state.db``) before you execute the -``docker run`` command above. + :substitutions: + + $ mkdir state + $ sudo chown -R 49999:49999 state + $ docker run --rm \ + --volume ./state/:/app/state flwr/superlink:|stable_flwr_version| \ + --database state.db \ + ... + +As soon as the SuperLink starts, the file ``state.db`` is created in the ``state`` +directory on your host system. If the file already exists, the SuperLink tries to +restore the state from the file. To start the SuperLink with an empty database, ensure +that there is no database called ``state.db`` in the ``state`` directory (``rm +state.db``) before you execute the ``docker run`` command above. diff --git a/doc/source/docker/pin-version.rst b/doc/source/docker/pin-version.rst index 800e3ed95423..4a69860aa428 100644 --- a/doc/source/docker/pin-version.rst +++ b/doc/source/docker/pin-version.rst @@ -1,10 +1,11 @@ Pin a Docker Image to a Specific Version ======================================== -It may happen that we update the images behind the tags. Such updates usually include security -updates of system dependencies that should not change the functionality of Flower. However, if -you want to ensure that you use a fixed version of the Docker image in your deployments, you can -`specify the digest `_ +It may happen that we update the images behind the tags. Such updates usually include +security updates of system dependencies that should not change the functionality of +Flower. However, if you want to ensure that you use a fixed version of the Docker image +in your deployments, you can `specify the digest +`_ of the image instead of the tag. Example @@ -14,23 +15,23 @@ The following command returns the current image digest referenced by the :substitution-code:`superlink:|stable_flwr_version|` tag: .. code-block:: bash - :substitutions: + :substitutions: - $ docker pull flwr/superlink:|stable_flwr_version| - $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:|stable_flwr_version| + $ docker pull flwr/superlink:|stable_flwr_version| + $ docker inspect --format='{{index .RepoDigests 0}}' flwr/superlink:|stable_flwr_version| This will output .. code-block:: bash - :substitutions: + :substitutions: - flwr/superlink@sha256:|stable__flwr_superlink_docker_digest| + flwr/superlink@sha256:|stable__flwr_superlink_docker_digest| Next, we can pin the digest when running a new SuperLink container: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run \ - --rm flwr/superlink@sha256:|latest_version_docker_sha| \ - [OPTIONS] + $ docker run \ + --rm flwr/superlink@sha256:|latest_version_docker_sha| \ + [OPTIONS] diff --git a/doc/source/docker/run-as-root-user.rst b/doc/source/docker/run-as-root-user.rst index d1b41a9b6168..5f8e5eae43af 100644 --- a/doc/source/docker/run-as-root-user.rst +++ b/doc/source/docker/run-as-root-user.rst @@ -2,11 +2,11 @@ Run with Root User Privileges ============================= Flower Docker images, by default, run with a non-root user (username/groupname: ``app``, -UID/GID: ``49999``). Using root user is **not recommended** unless it is necessary for specific -tasks during the build process. +UID/GID: ``49999``). Using root user is **not recommended** unless it is necessary for +specific tasks during the build process. -Always make sure to run the container as a non-root user in production to maintain security -best practices. +Always make sure to run the container as a non-root user in production to maintain +security best practices. Run a Container with Root User Privileges ----------------------------------------- @@ -14,32 +14,33 @@ Run a Container with Root User Privileges Run the Docker image with the ``-u`` flag and specify ``root`` as the username: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm -u root flwr/superlink:|stable_flwr_version| + $ docker run --rm -u root flwr/superlink:|stable_flwr_version| This command will run the Docker container with root user privileges. Run the Build Process with Root User Privileges ----------------------------------------------- -If you want to switch to the root user during the build process of the Docker image to install -missing system dependencies, you can use the ``USER root`` directive within your Dockerfile. +If you want to switch to the root user during the build process of the Docker image to +install missing system dependencies, you can use the ``USER root`` directive within your +Dockerfile. .. code-block:: dockerfile - :caption: SuperNode Dockerfile - :substitutions: + :caption: SuperNode Dockerfile + :substitutions: - FROM flwr/supernode:|stable_flwr_version| + FROM flwr/supernode:|stable_flwr_version| - # Switch to root user - USER root + # Switch to root user + USER root - # Install missing dependencies (requires root access) - RUN apt-get update && apt-get install -y + # Install missing dependencies (requires root access) + RUN apt-get update && apt-get install -y - # Switch back to non-root user app - USER app + # Switch back to non-root user app + USER app - # Continue with your Docker image build process - # ... + # Continue with your Docker image build process + # ... diff --git a/doc/source/docker/run-as-subprocess.rst b/doc/source/docker/run-as-subprocess.rst new file mode 100644 index 000000000000..d97319ff52af --- /dev/null +++ b/doc/source/docker/run-as-subprocess.rst @@ -0,0 +1,53 @@ +Run ClientApp as a Subprocess +============================= + +In this mode, the ClientApp is executed as a subprocess within the SuperNode Docker +container, rather than running in a separate container. This approach reduces the number +of running containers, which can be beneficial for environments with limited resources. +However, it also means that the ClientApp is no longer isolated from the SuperNode, +which may introduce additional security concerns. + +Prerequisites +------------- + +1. Before running the ClientApp as a subprocess, ensure that the FAB dependencies have + been installed in the SuperNode images. This can be done by extending the SuperNode + image: + + .. code-block:: dockerfile + :caption: Dockerfile.supernode + :linenos: + :substitutions: + + FROM flwr/supernode:|stable_flwr_version| + + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-supernode"] + +2. Next, build the SuperNode Docker image by running the following command in the + directory where Dockerfile is located: + + .. code-block:: shell + + $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + +Run the ClientApp as a Subprocess +--------------------------------- + +Start the SuperNode with the flag ``--isolation subprocess``, which tells the SuperNode +to execute the ClientApp as a subprocess: + +.. code-block:: shell + + $ docker run --rm \ + --detach \ + flwr_supernode:0.0.1 \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address localhost:9094 \ + --isolation subprocess diff --git a/doc/source/docker/run-quickstart-examples-docker-compose.rst b/doc/source/docker/run-quickstart-examples-docker-compose.rst new file mode 100644 index 000000000000..b31f0035e143 --- /dev/null +++ b/doc/source/docker/run-quickstart-examples-docker-compose.rst @@ -0,0 +1,127 @@ +Run Flower Quickstart Examples with Docker Compose +================================================== + +Flower provides a set of `quickstart examples +`_ to help you get started with the +framework. These examples are designed to demonstrate the capabilities of Flower and by +default run using the Simulation Engine. This guide demonstrates how to run them using +Flower's Deployment Engine via Docker Compose. + +.. important:: + + Some quickstart examples may have limitations or requirements that prevent them from + running on every environment. For more information, please see Limitations_. + +Prerequisites +------------- + +Before you start, make sure that: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running. +- Docker Compose is `installed `_. + +Run the Quickstart Example +-------------------------- + +1. Clone the quickstart example you like to run. For example, ``quickstart-pytorch``: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/quickstart-pytorch . \ + && rm -rf flower && cd quickstart-pytorch + +2. Download the `compose.yml + `_ file + into the example directory: + + .. code-block:: bash + + $ curl https://raw.githubusercontent.com/adap/flower/refs/heads/main/src/docker/complete/compose.yml \ + -o compose.yml + +3. Build and start the services using the following command: + + .. code-block:: bash + + $ docker compose up --build -d + +4. Append the following lines to the end of the ``pyproject.toml`` file and save it: + + .. code-block:: toml + :caption: pyproject.toml + + [tool.flwr.federations.local-deployment] + address = "127.0.0.1:9093" + insecure = true + + .. note:: + + You can customize the string that follows ``tool.flwr.federations.`` to fit your + needs. However, please note that the string cannot contain a dot (``.``). + + In this example, ``local-deployment`` has been used. Just remember to replace + ``local-deployment`` with your chosen name in both the ``tool.flwr.federations.`` + string and the corresponding ``flwr run .`` command. + +5. Run the example: + + .. code-block:: bash + + $ flwr run . local-deployment + +6. Follow the logs of the SuperExec service: + + .. code-block:: bash + + $ docker compose logs superexec -f + +That is all it takes! You can monitor the progress of the run through the logs of the +SuperExec. + +Run a Different Quickstart Example +---------------------------------- + +To run a different quickstart example, such as ``quickstart-tensorflow``, first, shut +down the Docker Compose services of the current example: + +.. code-block:: bash + + $ docker compose down + +After that, you can repeat the steps above. + +Limitations +----------- + +.. list-table:: + :header-rows: 1 + + - - Quickstart Example + - Limitations + - - quickstart-fastai + - None + - - quickstart-huggingface + - None + - - quickstart-jax + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-mlcube + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-mlx + - `Requires to run on macOS with Apple Silicon + `_. + - - quickstart-monai + - None + - - quickstart-pandas + - None + - - quickstart-pytorch-lightning + - Requires an older pip version that is not supported by the Flower Docker images. + - - quickstart-pytorch + - None + - - quickstart-sklearn-tabular + - None + - - quickstart-tabnet + - The example has not yet been updated to work with the latest ``flwr`` version. + - - quickstart-tensorflow + - Only runs on AMD64. diff --git a/doc/source/docker/set-environment-variables.rst b/doc/source/docker/set-environment-variables.rst index ff8d6dde0a29..f5d860812bab 100644 --- a/doc/source/docker/set-environment-variables.rst +++ b/doc/source/docker/set-environment-variables.rst @@ -8,7 +8,7 @@ Example ------- .. code-block:: bash - :substitutions: + :substitutions: - $ docker run -e FLWR_TELEMETRY_ENABLED=0 -e FLWR_TELEMETRY_LOGGING=0 \ - --rm flwr/superlink:|stable_flwr_version| + $ docker run -e FLWR_TELEMETRY_ENABLED=0 -e FLWR_TELEMETRY_LOGGING=0 \ + --rm flwr/superlink:|stable_flwr_version| diff --git a/doc/source/docker/tutorial-deploy-on-multiple-machines.rst b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst new file mode 100644 index 000000000000..72958c926ba9 --- /dev/null +++ b/doc/source/docker/tutorial-deploy-on-multiple-machines.rst @@ -0,0 +1,171 @@ +Deploy Flower on Multiple Machines with Docker Compose +====================================================== + +This guide will help you set up a Flower project on multiple machines using Docker +Compose. + +You will learn how to run the Flower client and server components on two separate +machines, with Flower configured to use TLS encryption and persist SuperLink state +across restarts. A server consists of a SuperLink and ``SuperExec``. For more details +about the Flower architecture, refer to the :doc:`../explanation-flower-architecture` +explainer page. + +This guide assumes you have completed the :doc:`tutorial-quickstart-docker-compose` +tutorial. It is highly recommended that you follow and understand the contents of that +tutorial before proceeding with this guide. + +Prerequisites +------------- + +Before you begin, make sure you have the following prerequisites: + +- The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally. +- The Docker daemon is running on your local machine and the remote machine. +- Docker Compose V2 is installed on both your local machine and the remote machine. +- You can connect to the remote machine from your local machine. +- Ports ``9091`` and ``9093`` are accessible on the remote machine. + +.. note:: + + The guide uses the |quickstart_sklearn_tabular|_ example as an example project. + + If your project has a different name or location, please remember to adjust the + commands/paths accordingly. + +Step 1: Set Up +-------------- + +1. Clone the Flower repository and change to the ``distributed`` directory: + + .. code-block:: bash + + $ git clone --depth=1 https://github.com/adap/flower.git + $ cd flower/src/docker/distributed + +2. Get the IP address from the remote machine and save it for later. +3. Use the ``certs.yml`` Compose file to generate your own self-signed certificates. If + you have certificates, you can continue with Step 2. + + .. important:: + + These certificates should be used only for development purposes. + + For production environments, you may have to use dedicated services to obtain + your certificates. + + First, set the environment variables ``SUPERLINK_IP`` and ``SUPEREXEC_IP`` with the + IP address from the remote machine. For example, if the IP is ``192.168.2.33``, + execute: + + .. code-block:: bash + + $ export SUPERLINK_IP=192.168.2.33 + $ export SUPEREXEC_IP=192.168.2.33 + + Next, generate the self-signed certificates: + + .. code-block:: bash + + $ docker compose -f certs.yml -f ../complete/certs.yml up --build + +Step 2: Copy the Server Compose Files +------------------------------------- + +Use the method that works best for you to copy the ``server`` directory, the +certificates, and your Flower project to the remote machine. + +For example, you can use ``scp`` to copy the directories: + +.. code-block:: bash + + $ scp -r ./server \ + ./superexec-certificates \ + ./superlink-certificates \ + ../../../examples/quickstart-sklearn-tabular remote:~/distributed + +Step 3: Start the Flower Server Components +------------------------------------------ + +Log into the remote machine using ``ssh`` and run the following command to start the +SuperLink and SuperExec services: + +.. code-block:: bash + + $ ssh + # In your remote machine + $ cd + $ export PROJECT_DIR=../quickstart-sklearn-tabular + $ docker compose -f server/compose.yml up --build -d + +.. note:: + + The Path of the ``PROJECT_DIR`` should be relative to the location of the ``server`` + Docker Compose files. + +Go back to your terminal on your local machine. + +Step 4: Start the Flower Client Components +------------------------------------------ + +On your local machine, run the following command to start the client components: + +.. code-block:: bash + + # In the `docker/distributed` directory + $ export PROJECT_DIR=../../../../examples/quickstart-sklearn-tabular + $ docker compose -f client/compose.yml up --build -d + +.. note:: + + The Path of the ``PROJECT_DIR`` should be relative to the location of the ``client`` + Docker Compose files. + +Step 5: Run Your Flower Project +------------------------------- + +Specify the remote SuperExec IP addresses and the path to the root certificate in the +``[tool.flwr.federations.remote-superexec]`` table in the ``pyproject.toml`` file. Here, +we have named our remote federation ``remote-superexec``: + +.. code-block:: toml + :caption: examples/quickstart-sklearn-tabular/pyproject.toml + + [tool.flwr.federations.remote-superexec] + address = "192.168.2.33:9093" + root-certificates = "../../src/docker/distributed/superexec-certificates/ca.crt" + +.. note:: + + The Path of the ``root-certificates`` should be relative to the location of the + ``pyproject.toml`` file. + +To run the project, execute: + +.. code-block:: bash + + $ flwr run ../../../examples/quickstart-sklearn-tabular remote-superexec + +That's it! With these steps, you've set up Flower on two separate machines and are ready +to start using it. + +Step 6: Clean Up +---------------- + +Shut down the Flower client components: + +.. code-block:: bash + + # In the `docker/distributed` directory + $ docker compose -f client/compose.yml down + +Shut down the Flower server components and delete the SuperLink state: + +.. code-block:: bash + + $ ssh + $ cd + $ docker compose -f server/compose.yml down -v + +.. |quickstart_sklearn_tabular| replace:: ``examples/quickstart-sklearn-tabular`` + +.. _quickstart_sklearn_tabular: https://github.com/adap/flower/tree/main/examples/quickstart-sklearn-tabular diff --git a/doc/source/docker/tutorial-quickstart-docker-compose.rst b/doc/source/docker/tutorial-quickstart-docker-compose.rst index 93a000295951..bff3125c1b16 100644 --- a/doc/source/docker/tutorial-quickstart-docker-compose.rst +++ b/doc/source/docker/tutorial-quickstart-docker-compose.rst @@ -2,11 +2,12 @@ Quickstart with Docker Compose ============================== This quickstart shows you how to set up Flower using Docker Compose in a single command, -allowing you to focus on developing your application without worrying about the underlying -infrastructure. +allowing you to focus on developing your application without worrying about the +underlying infrastructure. -You will also learn how to easily enable TLS encryption and persist application state locally, -giving you the freedom to choose the configuration that best suits your project's needs. +You will also learn how to easily enable TLS encryption and persist application state +locally, giving you the freedom to choose the configuration that best suits your +project's needs. Prerequisites ------------- @@ -20,55 +21,56 @@ Before you start, make sure that: Step 1: Set Up -------------- -#. Clone the Docker Compose ``complete`` directory: +1. Clone the Docker Compose ``complete`` directory: .. code-block:: bash - $ git clone --depth=1 https://github.com/adap/flower.git _tmp \ - && mv _tmp/src/docker/complete . \ - && rm -rf _tmp && cd complete + $ git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/src/docker/complete . \ + && rm -rf _tmp && cd complete -#. Create a new Flower project (PyTorch): +2. Create a new Flower project (PyTorch): .. code-block:: bash - $ flwr new quickstart-compose --framework PyTorch --username flower + $ flwr new quickstart-compose --framework PyTorch --username flower -#. Export the path of the newly created project. The path should be relative to the location of the - Docker Compose files: +3. Export the path of the newly created project. The path should be relative to the + location of the Docker Compose files: .. code-block:: bash - $ export PROJECT_DIR=quickstart-compose + $ export PROJECT_DIR=quickstart-compose - Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, allowing - it to install dependencies in the SuperExec and SuperNode images correctly. + Setting the ``PROJECT_DIR`` helps Docker Compose locate the ``pyproject.toml`` file, + allowing it to install dependencies in the SuperExec and SuperNode images correctly. -Step 2: Run Flower in insecure mode +Step 2: Run Flower in Insecure Mode ----------------------------------- -To begin, start Flower with the most basic configuration. In this setup, Flower -will run without TLS and without persisting the state. +To begin, start Flower with the most basic configuration. In this setup, Flower will run +without TLS and without persisting the state. .. note:: - Without TLS, the data sent between the services remains **unencrypted**. Use it only for development - purposes. + Without TLS, the data sent between the services remains **unencrypted**. Use it only + for development purposes. - For production-oriented use cases, :ref:`enable TLS` for secure data transmission. + For production-oriented use cases, :ref:`enable TLS` for secure data + transmission. Open your terminal and run: .. code-block:: bash - $ docker compose -f compose.yml up --build -d + $ docker compose -f compose.yml up --build -d .. dropdown:: Understand the command - * ``docker compose``: The Docker command to run the Docker Compose tool. - * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. - * ``--build``: Rebuild the images for each service if they don't already exist. - * ``-d``: Detach the containers from the terminal and run them in the background. + * ``docker compose``: The Docker command to run the Docker Compose tool. + * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. + * ``--build``: Rebuild the images for each service if they don't already exist. + * ``-d``: Detach the containers from the terminal and run them in the background. Step 3: Run the Quickstart Project ---------------------------------- @@ -76,291 +78,321 @@ Step 3: Run the Quickstart Project Now that the Flower services have been started via Docker Compose, it is time to run the quickstart example. -To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify the SuperExec addresses -in the ``pyproject.toml`` file. +To ensure the ``flwr`` CLI connects to the SuperExec, you need to specify the SuperExec +addresses in the ``pyproject.toml`` file. -#. Add the following lines to the ``quickstart-compose/pyproject.toml``: +1. Add the following lines to the ``quickstart-compose/pyproject.toml``: .. code-block:: toml - :caption: quickstart-compose/pyproject.toml + :caption: quickstart-compose/pyproject.toml - [tool.flwr.federations.docker-compose] - address = "127.0.0.1:9093" - insecure = true + [tool.flwr.federations.docker-compose] + address = "127.0.0.1:9093" + insecure = true -#. Execute the command to run the quickstart example: +2. Execute the command to run the quickstart example: .. code-block:: bash - $ flwr run quickstart-compose docker-compose + $ flwr run quickstart-compose docker-compose -#. Monitor the SuperExec logs and wait for the summary to appear: +3. Monitor the SuperExec logs and wait for the summary to appear: .. code-block:: bash - $ docker compose logs superexec -f + $ docker compose logs superexec -f Step 4: Update the Application ------------------------------ In the next step, change the application code. -#. For example, go to the ``task.py`` file in the ``quickstart-compose/quickstart_compose/`` - directory and add a ``print`` call in the ``get_weights`` function: +1. For example, go to the ``task.py`` file in the + ``quickstart-compose/quickstart_compose/`` directory and add a ``print`` call in the + ``get_weights`` function: .. code-block:: python - :caption: quickstart-compose/quickstart_compose/task.py + :caption: quickstart-compose/quickstart_compose/task.py - # ... - def get_weights(net): - print("Get weights") - return [val.cpu().numpy() for _, val in net.state_dict().items()] - # ... + # ... + def get_weights(net): + print("Get weights") + return [val.cpu().numpy() for _, val in net.state_dict().items()] -#. Rebuild and restart the services. + + # ... + +2. Rebuild and restart the services. .. note:: - If you have modified the dependencies listed in your ``pyproject.toml`` file, it is essential - to rebuild images. + If you have modified the dependencies listed in your ``pyproject.toml`` file, it + is essential to rebuild images. - If you haven't made any changes, you can skip this step. + If you haven't made any changes, you can skip this step. Run the following command to rebuild and restart the services: .. code-block:: bash - $ docker compose -f compose.yml up --build -d + $ docker compose -f compose.yml up --build -d -#. Run the updated quickstart example: +3. Run the updated quickstart example: .. code-block:: bash - $ flwr run quickstart-compose docker-compose - $ docker compose logs superexec -f + $ flwr run quickstart-compose docker-compose + $ docker compose logs superexec -f In the SuperExec logs, you should find the ``Get weights`` line: .. code-block:: - :emphasize-lines: 9 - - superexec-1 | INFO : Starting Flower SuperExec - superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP server. - superexec-1 | INFO : Starting Flower SuperExec gRPC server on 0.0.0.0:9093 - superexec-1 | INFO : ExecServicer.StartRun - superexec-1 | 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower/quickstart-compose/1.0.0. - superexec-1 | INFO : Created run -6767165609169293507 - superexec-1 | INFO : Started run -6767165609169293507 - superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP client connected to superlink:9091. - superexec-1 | Get weights - superexec-1 | INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + :emphasize-lines: 9 + + superexec-1 | INFO : Starting Flower SuperExec + superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP server. + superexec-1 | INFO : Starting Flower SuperExec gRPC server on 0.0.0.0:9093 + superexec-1 | INFO : ExecServicer.StartRun + superexec-1 | 🎊 Successfully installed quickstart-compose to /app/.flwr/apps/flower/quickstart-compose/1.0.0. + superexec-1 | INFO : Created run -6767165609169293507 + superexec-1 | INFO : Started run -6767165609169293507 + superexec-1 | WARNING : Option `--insecure` was set. Starting insecure HTTP client connected to superlink:9091. + superexec-1 | Get weights + superexec-1 | INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout Step 5: Persisting the SuperLink State -------------------------------------- -In this step, Flower services are configured to persist the state of the SuperLink service, -ensuring that it maintains its state even after a restart. +In this step, Flower services are configured to persist the state of the SuperLink +service, ensuring that it maintains its state even after a restart. .. note:: - When working with Docker Compose on Linux, you may need to create the ``state`` directory first - and change its ownership to ensure proper access and permissions. + When working with Docker Compose on Linux, you may need to create the ``state`` + directory first and change its ownership to ensure proper access and permissions. For more information, consult the following page: :doc:`persist-superlink-state`. -#. Run the command: +1. Run the command: .. code-block:: bash - $ docker compose -f compose.yml -f with-state.yml up --build -d + $ docker compose -f compose.yml -f with-state.yml up --build -d .. dropdown:: Understand the command - * ``docker compose``: The Docker command to run the Docker Compose tool. - * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. - * | ``-f with-state.yml``: Specifies the path to an additional Docker Compose file that - | contains the configuration for persisting the SuperLink state. - | - | Docker merges Compose files according to `merging rules `_. - * ``--build``: Rebuild the images for each service if they don't already exist. - * ``-d``: Detach the containers from the terminal and run them in the background. + * ``docker compose``: The Docker command to run the Docker Compose tool. + * ``-f compose.yml``: Specify the YAML file that contains the basic Flower service definitions. + * | ``-f with-state.yml``: Specifies the path to an additional Docker Compose file that + | contains the configuration for persisting the SuperLink state. + | + | Docker merges Compose files according to `merging rules `_. + * ``--build``: Rebuild the images for each service if they don't already exist. + * ``-d``: Detach the containers from the terminal and run them in the background. -#. Rerun the ``quickstart-compose`` project: +2. Rerun the ``quickstart-compose`` project: .. code-block:: bash - $ flwr run quickstart-compose docker-compose + $ flwr run quickstart-compose docker-compose -#. Check the content of the ``state`` directory: +3. Check the content of the ``state`` directory: .. code-block:: bash - $ ls state/ - state.db + $ ls state/ + state.db - You should see a ``state.db`` file in the ``state`` directory. If you restart the service, the - state file will be used to restore the state from the previously saved data. This ensures that - the data persists even if the containers are stopped and started again. + You should see a ``state.db`` file in the ``state`` directory. If you restart the + service, the state file will be used to restore the state from the previously saved + data. This ensures that the data persists even if the containers are stopped and + started again. -.. _TLS: +.. _tls: Step 6: Run Flower with TLS --------------------------- -#. To demonstrate how to enable TLS, generate self-signed certificates using the ``certs.yml`` - Compose file. +1. To demonstrate how to enable TLS, generate self-signed certificates using the + ``certs.yml`` Compose file. .. important:: - These certificates should be used only for development purposes. + These certificates should be used only for development purposes. - For production environments, use a service like `Let's Encrypt `_ - to obtain your certificates. + For production environments, use a service like `Let's Encrypt + `_ to obtain your certificates. Run the command: .. code-block:: bash - $ docker compose -f certs.yml up --build + $ docker compose -f certs.yml up --build -#. Add the following lines to the ``quickstart-compose/pyproject.toml``: +2. Add the following lines to the ``quickstart-compose/pyproject.toml``: .. code-block:: toml - :caption: quickstart-compose/pyproject.toml + :caption: quickstart-compose/pyproject.toml - [tool.flwr.federations.docker-compose-tls] - address = "127.0.0.1:9093" - root-certificates = "superexec-certificates/ca.crt" + [tool.flwr.federations.docker-compose-tls] + address = "127.0.0.1:9093" + root-certificates = "../superexec-certificates/ca.crt" -#. Restart the services with TLS enabled: +3. Restart the services with TLS enabled: .. code-block:: bash - $ docker compose -f compose.yml -f with-tls.yml up --build -d + $ docker compose -f compose.yml -f with-tls.yml up --build -d -#. Rerun the ``quickstart-compose`` project: +4. Rerun the ``quickstart-compose`` project: .. code-block:: bash - $ flwr run quickstart-compose docker-compose-tls - $ docker compose logs superexec -f + $ flwr run quickstart-compose docker-compose-tls + $ docker compose logs superexec -f Step 7: Add another SuperNode ----------------------------- -You can add more SuperNodes by duplicating the SuperNode definition in the ``compose.yml`` file. +You can add more SuperNodes and ClientApps by duplicating their definitions in the +``compose.yml`` file. -Just make sure to give each new SuperNode service a unique service name like ``supernode-3``, ``supernode-4``, etc. +Just give each new SuperNode and ClientApp service a unique service name like +``supernode-3``, ``clientapp-3``, etc. In ``compose.yml``, add the following: .. code-block:: yaml - :caption: compose.yml - - services: - # other service definitions - - supernode-3: - user: root - deploy: - resources: - limits: - cpus: "2" - command: - - --superlink - - superlink:9092 - - --insecure - depends_on: - - superlink - volumes: - - apps-volume:/app/.flwr/apps/:ro - build: - context: ${PROJECT_DIR:-.} - dockerfile_inline: | - FROM flwr/supernode:${FLWR_VERSION:-1.10.0} - - WORKDIR /app - COPY --chown=app:app pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . - - ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=0,num-partitions=2"] - -If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode definition for -each new SuperNode service in the ``with-tls.yml`` file. + :caption: compose.yml + :substitutions: + + # other service definitions + + supernode-3: + image: flwr/supernode:${FLWR_VERSION:-|stable_flwr_version|} + command: + - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + depends_on: + - superlink + + clientapp-3: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-|stable_flwr_version|} + + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-3:9096 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-3 + +If you also want to enable TLS for the new SuperNodes, duplicate the SuperNode +definition for each new SuperNode service in the ``with-tls.yml`` file. Make sure that the names of the services match with the one in the ``compose.yml`` file. In ``with-tls.yml``, add the following: .. code-block:: yaml - :caption: with-tls.yml - - services: - # other service definitions - - supernode-3: - command: - - --superlink - - superlink:9092 - - --root-certificates - - certificates/ca.crt - secrets: - - source: superlink-ca-certfile - target: /app/certificates/ca.crt + :caption: with-tls.yml + + # other service definitions + + supernode-3: + command: + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9096 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt Step 8: Persisting the SuperLink State and Enabling TLS ------------------------------------------------------- -To run Flower with persisted SuperLink state and enabled TLS, a slight change in the ``with-state.yml`` -file is required: +To run Flower with persisted SuperLink state and enabled TLS, a slight change in the +``with-state.yml`` file is required: -#. Comment out the lines 3-5 and uncomment the lines 6-10: +1. Comment out the lines 2-4 and uncomment the lines 5-9: .. code-block:: yaml - :caption: with-state.yml - :linenos: - :emphasize-lines: 3-10 - - services: - superlink: - # command: - # - --insecure - # - --database=state/state.db - command: - - --ssl-ca-certfile=certificates/ca.crt - - --ssl-certfile=certificates/server.pem - - --ssl-keyfile=certificates/server.key - - --database=state/state.db - volumes: - - ./state/:/app/state/:rw - -#. Restart the services: + :caption: with-state.yml + :linenos: + :emphasize-lines: 2-9 + + superlink: + # command: + # - --insecure + # - --database=state/state.db + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + - --database=state/state.db + volumes: + - ./state/:/app/state/:rw + +2. Restart the services: .. code-block:: bash - $ docker compose -f compose.yml -f with-tls.yml -f with-state.yml up --build -d + $ docker compose -f compose.yml -f with-tls.yml -f with-state.yml up --build -d -#. Rerun the ``quickstart-compose`` project: +3. Rerun the ``quickstart-compose`` project: .. code-block:: bash - $ flwr run quickstart-compose docker-compose-tls - $ docker compose logs superexec -f + $ flwr run quickstart-compose docker-compose-tls + $ docker compose logs superexec -f Step 9: Merge Multiple Compose Files ------------------------------------ -You can merge multiple Compose files into a single file. For instance, if you wish to combine -the basic configuration with the TLS configuration, execute the following command: +You can merge multiple Compose files into a single file. For instance, if you wish to +combine the basic configuration with the TLS configuration, execute the following +command: .. code-block:: bash - $ docker compose -f compose.yml \ - -f with-tls.yml config --no-path-resolution > my_compose.yml + $ docker compose -f compose.yml \ + -f with-tls.yml config --no-path-resolution > my_compose.yml -This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into a new file called -``my_compose.yml``. +This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into a new file +called ``my_compose.yml``. Step 10: Clean Up ----------------- @@ -369,5 +401,10 @@ Remove all services and volumes: .. code-block:: bash - $ docker compose down -v - $ docker compose -f certs.yml down -v + $ docker compose down -v + $ docker compose -f certs.yml down -v + +Where to Go Next +---------------- + +- :doc:`run-quickstart-examples-docker-compose` diff --git a/doc/source/docker/tutorial-quickstart-docker.rst b/doc/source/docker/tutorial-quickstart-docker.rst index 29ae6d5f6a43..993754dcf109 100644 --- a/doc/source/docker/tutorial-quickstart-docker.rst +++ b/doc/source/docker/tutorial-quickstart-docker.rst @@ -1,11 +1,11 @@ Quickstart with Docker ====================== -This quickstart aims to guide you through the process of containerizing a Flower project and -running it end to end using Docker on your local machine. +This quickstart aims to guide you through the process of containerizing a Flower project +and running it end to end using Docker on your local machine. -This tutorial does not use production-ready settings, so you can focus on understanding the basic -workflow that uses the minimum configurations. +This tutorial does not use production-ready settings, so you can focus on understanding +the basic workflow that uses the minimum configurations. Prerequisites ------------- @@ -18,33 +18,33 @@ Before you start, make sure that: Step 1: Set Up -------------- -#. Create a new Flower project (PyTorch): +1. Create a new Flower project (PyTorch): .. code-block:: bash - $ flwr new quickstart-docker --framework PyTorch --username flower + $ flwr new quickstart-docker --framework PyTorch --username flower - 🔨 Creating Flower project quickstart-docker... - 🎊 Project creation successful. + 🔨 Creating Flower project quickstart-docker... + 🎊 Project creation successful. - Use the following command to run your project: + Use the following command to run your project: - cd quickstart-docker - pip install -e . - flwr run + cd quickstart-docker + pip install -e . + flwr run - $ cd quickstart-docker - $ pip install -e . + $ cd quickstart-docker + $ pip install -e . -#. Create a new Docker bridge network called ``flwr-network``: +2. Create a new Docker bridge network called ``flwr-network``: .. code-block:: bash - $ docker network create --driver bridge flwr-network + $ docker network create --driver bridge flwr-network - User-defined networks, such as ``flwr-network``, enable IP resolution of container names, a feature - absent in the default bridge network. This simplifies quickstart example by avoiding the need to - determine host IP first. + User-defined networks, such as ``flwr-network``, enable IP resolution of container + names, a feature absent in the default bridge network. This simplifies quickstart + example by avoiding the need to determine host IP first. Step 2: Start the SuperLink --------------------------- @@ -52,319 +52,357 @@ Step 2: Start the SuperLink Open your terminal and run: .. code-block:: bash - :substitutions: + :substitutions: - $ docker run --rm \ - -p 9091:9091 -p 9092:9092 \ - --network flwr-network \ - --name superlink \ - --detach \ - flwr/superlink:|stable_flwr_version| --insecure + $ docker run --rm \ + -p 9091:9091 -p 9092:9092 \ + --network flwr-network \ + --name superlink \ + --detach \ + flwr/superlink:|stable_flwr_version| --insecure .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of - | the host machine, allowing you to access the Driver API on ``http://localhost:9091`` and - | the Fleet API on ``http://localhost:9092``. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--name superlink``: Assign the name ``superlink`` to the container. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific - | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a :doc:`specific version ` of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9091:9091 -p 9092:9092``: Map port ``9091`` and ``9092`` of the container to the same port of + | the host machine, allowing other services to access the Driver API on + | ``http://localhost:9091`` and the Fleet API on ``http://localhost:9092``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name superlink``: Assign the name ``superlink`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | :substitution-code:`flwr/superlink:|stable_flwr_version|`: The name of the image to be run and the specific + | tag of the image. The tag :substitution-code:`|stable_flwr_version|` represents a :doc:`specific version ` of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. Step 3: Start the SuperNode --------------------------- -The SuperNode Docker image comes with a pre-installed version of Flower and serves as a base for -building your own SuperNode image. +Start two SuperNode containers. -#. Create a SuperNode Dockerfile called ``Dockerfile.supernode`` and paste the following code into it: +1. Start the first container: - .. code-block:: dockerfile - :caption: Dockerfile.supernode - :linenos: - :substitutions: + .. code-block:: bash + :substitutions: + + $ docker run --rm \ + -p 9094:9094 \ + --network flwr-network \ + --name supernode-1 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=0 num-partitions=2" \ + --supernode-address 0.0.0.0:9094 \ + --isolation process - FROM flwr/supernode:|stable_flwr_version| + .. dropdown:: Understand the command - WORKDIR /app - COPY pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9094:9094``: Map port ``9094`` of the container to the same port of + | the host machine, allowing other services to access the SuperNode API on + | ``http://localhost:9094``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name supernode-1``: Assign the name ``supernode-1`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr/supernode:|stable_flwr_version|``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--superlink superlink:9092``: Connect to the SuperLink's Fleet API at the address + | ``superlink:9092``. + * | ``--node-config "partition-id=0 num-partitions=2"``: Set the partition ID to ``0`` and the + | number of partitions to ``2`` for the SuperNode configuration. + * | ``--supernode-address 0.0.0.0:9094``: Set the address and port number that the SuperNode + | is listening on. + * | ``--isolation process``: Tells the SuperNode that the ClientApp is created by separate + | independent process. The SuperNode does not attempt to create it. + +2. Start the second container: - COPY flower.quickstart-docker.1-0-0.fab . - RUN flwr install flower.quickstart-docker.1-0-0.fab + .. code-block:: shell + :substitutions: + + $ docker run --rm \ + -p 9095:9095 \ + --network flwr-network \ + --name supernode-2 \ + --detach \ + flwr/supernode:|stable_flwr_version| \ + --insecure \ + --superlink superlink:9092 \ + --node-config "partition-id=1 num-partitions=2" \ + --supernode-address 0.0.0.0:9095 \ + --isolation process + +Step 4: Start the ClientApp +--------------------------- - ENTRYPOINT ["flower-supernode"] +The ClientApp Docker image comes with a pre-installed version of Flower and serves as a +base for building your own ClientApp image. In order to install the FAB dependencies, +you will need to create a Dockerfile that extends the ClientApp image and installs the +required dependencies. - .. dropdown:: Understand the Dockerfile +1. Create a ClientApp Dockerfile called ``Dockerfile.clientapp`` and paste the following + code into it: - * | :substitution-code:`FROM flwr/supernode:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/supernode image``, version :substitution-code:`|stable_flwr_version|`. - * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. - | Any subsequent commands that reference a directory will be relative to this directory. - * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file - | from the current working directory into the container's ``/app`` directory. - * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency - | from the ``pyproject.toml``. - * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to - | install the dependencies defined in the ``pyproject.toml`` file - | - | The ``-U`` flag indicates that any existing packages should be upgraded, and - | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``COPY flower.quickstart-docker.1-0-0.fab .``: Copy the - | ``flower.quickstart-docker.1-0-0.fab`` file from the current working directory into - | the container's ``/app`` directory. - * | ``RUN flwr install flower.quickstart-docker.1-0-0.fab``: Run the ``flwr`` install command - | to install the Flower App Bundle locally. - * | ``ENTRYPOINT ["flower-supernode"]``: Set the command ``flower-supernode`` to be - | the default command run when the container is started. + .. code-block:: dockerfile + :caption: Dockerfile.clientapp + :linenos: + :substitutions: - .. important:: + FROM flwr/clientapp:|stable_flwr_version| - Note that `flwr `__ is already installed in the ``flwr/supernode`` - base image, so only other package dependencies such as ``flwr-datasets``, ``torch``, etc., - need to be installed. As a result, the ``flwr`` dependency is removed from the - ``pyproject.toml`` after it has been copied into the Docker image (see line 5). + WORKDIR /app + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . -#. Build the Flower App Bundle (FAB): + ENTRYPOINT ["flwr-clientapp"] - .. code-block:: bash + .. dropdown:: Understand the Dockerfile + + * | :substitution-code:`FROM flwr/clientapp:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/clientapp image``, version :substitution-code:`|stable_flwr_version|`. + * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. + | Any subsequent commands that reference a directory will be relative to this directory. + * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file + | from the current working directory into the container's ``/app`` directory. + * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency + | from the ``pyproject.toml``. + * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to + | install the dependencies defined in the ``pyproject.toml`` file + | + | The ``-U`` flag indicates that any existing packages should be upgraded, and + | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. + * | ``ENTRYPOINT ["flwr-clientapp"]``: Set the command ``flwr-clientapp`` to be + | the default command run when the container is started. - $ flwr build + .. important:: -#. Next, build the SuperNode Docker image by running the following command in the directory where - Dockerfile is located: + Note that `flwr `__ is already installed in the + ``flwr/clientapp`` base image, so only other package dependencies such as + ``flwr-datasets``, ``torch``, etc., need to be installed. As a result, the + ``flwr`` dependency is removed from the ``pyproject.toml`` after it has been + copied into the Docker image (see line 5). + +2. Next, build the ClientApp Docker image by running the following command in the + directory where the Dockerfile is located: .. code-block:: bash - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . .. note:: - The image name was set as ``flwr_supernode`` with the tag ``0.0.1``. Remember that - these values are merely examples, and you can customize them according to your requirements. + The image name was set as ``flwr_clientapp`` with the tag ``0.0.1``. Remember + that these values are merely examples, and you can customize them according to + your requirements. -#. Start the first SuperNode container: +3. Start the first ClientApp container: .. code-block:: bash - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=0,num-partitions=2 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_supernode:0.0.1``: This is the name of the image to be run and the specific tag - | of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. - * | ``--superlink superlink:9092``: Connect to the SuperLinks Fleet API on the address - | ``superlink:9092``. - * | ``--node-config partition-id=0,num-partitions=2``: Set the partition ID to ``0`` and the - | number of partitions to ``2`` for the SuperNode configuration. - -#. Start the second SuperNode container: + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr_clientapp:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--supernode supernode-1:9094``: Connect to the SuperNode's Fleet API at the address + | ``supernode-1:9094``. + +4. Start the second ClientApp container: .. code-block:: shell - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=1,num-partitions=2 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 -Step 4: Start the SuperExec +Step 5: Start the SuperExec --------------------------- -The procedure for building and running a SuperExec image is almost identical to the SuperNode image. +The procedure for building and running a SuperExec image is almost identical to the +ClientApp image. -Similar to the SuperNode image, the SuperExec Docker image comes with a pre-installed version of -Flower and serves as a base for building your own SuperExec image. +Similar to the ClientApp image, you will need to create a Dockerfile that extends the +SuperExec image and installs the required FAB dependencies. -#. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following code in: +1. Create a SuperExec Dockerfile called ``Dockerfile.superexec`` and paste the following + code in: .. code-block:: dockerfile - :caption: Dockerfile.superexec - :substitutions: + :caption: Dockerfile.superexec + :substitutions: - FROM flwr/superexec:|stable_flwr_version| + FROM flwr/superexec:|stable_flwr_version| - WORKDIR /app + WORKDIR /app - COPY pyproject.toml . - RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ - && python -m pip install -U --no-cache-dir . + COPY pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-superexec", "--executor", "flwr.superexec.deployment:executor"] + ENTRYPOINT ["flower-superexec", "--executor", "flwr.superexec.deployment:executor"] .. dropdown:: Understand the Dockerfile - * | :substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line specifies that the Docker image - | to be built from is the ``flwr/superexec image``, version :substitution-code:`|stable_flwr_version|`. - * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. - | Any subsequent commands that reference a directory will be relative to this directory. - * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file - | from the current working directory into the container's ``/app`` directory. - * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency - | from the ``pyproject.toml``. - * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to - | install the dependencies defined in the ``pyproject.toml`` file - | - | The ``-U`` flag indicates that any existing packages should be upgraded, and - | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. - * | ``ENTRYPOINT ["flower-superexec"``: Set the command ``flower-superexec`` to be - | the default command run when the container is started. - | - | ``"--executor", "flwr.superexec.deployment:executor"]`` Use the - | ``flwr.superexec.deployment:executor`` executor to run the ServerApps. - -#. Afterward, in the directory that holds the Dockerfile, execute this Docker command to + * | :substitution-code:`FROM flwr/superexec:|stable_flwr_version|`: This line specifies that the Docker image + | to be built from is the ``flwr/superexec image``, version :substitution-code:`|stable_flwr_version|`. + * | ``WORKDIR /app``: Set the working directory for the container to ``/app``. + | Any subsequent commands that reference a directory will be relative to this directory. + * | ``COPY pyproject.toml .``: Copy the ``pyproject.toml`` file + | from the current working directory into the container's ``/app`` directory. + * | ``RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml``: Remove the ``flwr`` dependency + | from the ``pyproject.toml``. + * | ``python -m pip install -U --no-cache-dir .``: Run the ``pip`` install command to + | install the dependencies defined in the ``pyproject.toml`` file + | + | The ``-U`` flag indicates that any existing packages should be upgraded, and + | ``--no-cache-dir`` prevents pip from using the cache to speed up the installation. + * | ``ENTRYPOINT ["flower-superexec"``: Set the command ``flower-superexec`` to be + | the default command run when the container is started. + | + | ``"--executor", "flwr.superexec.deployment:executor"]`` Use the + | ``flwr.superexec.deployment:executor`` executor to run the ServerApps. + +2. Afterward, in the directory that holds the Dockerfile, execute this Docker command to build the SuperExec image: .. code-block:: bash - $ docker build -f Dockerfile.superexec -t flwr_superexec:0.0.1 . + $ docker build -f Dockerfile.superexec -t flwr_superexec:0.0.1 . - -#. Start the SuperExec container: +3. Start the SuperExec container: .. code-block:: bash - $ docker run --rm \ - -p 9093:9093 \ - --network flwr-network \ - --name superexec \ - --detach \ - flwr_superexec:0.0.1 \ - --insecure \ - --executor-config \ - superlink=\"superlink:9091\" + $ docker run --rm \ + -p 9093:9093 \ + --network flwr-network \ + --name superexec \ + --detach \ + flwr_superexec:0.0.1 \ + --insecure \ + --executor-config superlink=\"superlink:9091\" .. dropdown:: Understand the command - * ``docker run``: This tells Docker to run a container from an image. - * ``--rm``: Remove the container once it is stopped or the command exits. - * | ``-p 9093:9093``: Map port ``9093`` of the container to the same port of - | the host machine, allowing you to access the SuperExec API on ``http://localhost:9093``. - * ``--network flwr-network``: Make the container join the network named ``flwr-network``. - * ``--name superexec``: Assign the name ``superexec`` to the container. - * ``--detach``: Run the container in the background, freeing up the terminal. - * | ``flwr_superexec:0.0.1``: This is the name of the image to be run and the specific tag - | of the image. - * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing - | unencrypted communication. - * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to - | connect to the SuperLink running on port ``9091``. - -Step 5: Run the Quickstart Project + * ``docker run``: This tells Docker to run a container from an image. + * ``--rm``: Remove the container once it is stopped or the command exits. + * | ``-p 9093:9093``: Map port ``9093`` of the container to the same port of + | the host machine, allowing you to access the SuperExec API on ``http://localhost:9093``. + * ``--network flwr-network``: Make the container join the network named ``flwr-network``. + * ``--name superexec``: Assign the name ``superexec`` to the container. + * ``--detach``: Run the container in the background, freeing up the terminal. + * | ``flwr_superexec:0.0.1``: This is the name of the image to be run and the specific tag + | of the image. + * | ``--insecure``: This flag tells the container to operate in an insecure mode, allowing + | unencrypted communication. + * | ``--executor-config superlink=\"superlink:9091\"``: Configure the SuperExec executor to + | connect to the SuperLink running on port ``9091``. + +Step 6: Run the Quickstart Project ---------------------------------- -#. Add the following lines to the ``pyproject.toml``: +1. Add the following lines to the ``pyproject.toml``: .. code-block:: toml - :caption: pyproject.toml + :caption: pyproject.toml - [tool.flwr.federations.docker] - address = "127.0.0.1:9093" - insecure = true + [tool.flwr.federations.docker] + address = "127.0.0.1:9093" + insecure = true -#. Run the ``quickstart-docker`` project by executing the command: +2. Run the ``quickstart-docker`` project by executing the command: .. code-block:: bash - $ flwr run . docker + $ flwr run . docker -#. Follow the SuperExec logs to track the execution of the run: +3. Follow the SuperExec logs to track the execution of the run: .. code-block:: bash - $ docker logs -f superexec + $ docker logs -f superexec -Step 6: Update the Application +Step 7: Update the Application ------------------------------ -#. Change the application code. For example, change the ``seed`` in ``quickstart_docker/task.py`` - to ``43`` and save it: +1. Change the application code. For example, change the ``seed`` in + ``quickstart_docker/task.py`` to ``43`` and save it: .. code-block:: python - :caption: quickstart_docker/task.py + :caption: quickstart_docker/task.py - # ... - partition_train_test = partition.train_test_split(test_size=0.2, seed=43) - # ... + # ... + partition_train_test = partition.train_test_split(test_size=0.2, seed=43) + # ... -#. Stop the current SuperNode containers: +2. Stop the current ClientApp containers: .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_supernode:0.0.1) + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) -#. Rebuild the FAB and SuperNode image: +3. Rebuild the FAB and ClientApp image: .. code-block:: bash - $ flwr build - $ docker build -f Dockerfile.supernode -t flwr_supernode:0.0.1 . + $ docker build -f Dockerfile.clientapp -t flwr_clientapp:0.0.1 . -#. Launch two new SuperNode containers based on the newly built image: +4. Launch two new ClientApp containers based on the newly built image: .. code-block:: bash - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=0,num-partitions=2 - $ docker run --rm \ - --network flwr-network \ - --detach \ - flwr_supernode:0.0.1 \ - --insecure \ - --superlink superlink:9092 \ - --node-config \ - partition-id=1,num-partitions=2 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-1:9094 + $ docker run --rm \ + --network flwr-network \ + --detach \ + flwr_clientapp:0.0.1 \ + --supernode supernode-2:9095 -#. Run the updated project: +5. Run the updated project: .. code-block:: bash - $ flwr run . docker + $ flwr run . docker -Step 7: Clean Up +Step 8: Clean Up ---------------- Remove the containers and the bridge network: .. code-block:: bash - $ docker stop $(docker ps -a -q --filter ancestor=flwr_supernode:0.0.1) \ - superexec \ - superlink - $ docker network rm flwr-network + $ docker stop $(docker ps -a -q --filter ancestor=flwr_clientapp:0.0.1) \ + supernode-1 \ + supernode-2 \ + superexec \ + superlink + $ docker network rm flwr-network Where to Go Next ---------------- -* :doc:`enable-tls` -* :doc:`persist-superlink-state` -* :doc:`tutorial-quickstart-docker-compose` +- :doc:`enable-tls` +- :doc:`persist-superlink-state` +- :doc:`tutorial-quickstart-docker-compose` diff --git a/doc/source/docker/use-a-different-version.rst b/doc/source/docker/use-a-different-version.rst index 73e5f4218663..9108f5157dcd 100644 --- a/doc/source/docker/use-a-different-version.rst +++ b/doc/source/docker/use-a-different-version.rst @@ -1,12 +1,13 @@ Use a Different Flower Version ============================== -If you want to use a different version of Flower, for example Flower nightly, you can do so by -changing the tag. All available versions are on `Docker Hub `__. +If you want to use a different version of Flower, for example Flower nightly, you can do +so by changing the tag. All available versions are on `Docker Hub +`__. .. important:: - When using Flower nightly, the SuperLink nightly image must be paired with the corresponding - SuperNode and ServerApp nightly images released on the same day. To ensure the versions are - in sync, using the concrete tag, e.g., ``1.10.0.dev20240610`` instead of ``nightly`` is - recommended. + When using Flower nightly, the SuperLink nightly image must be paired with the + corresponding SuperNode and ServerApp nightly images released on the same day. To + ensure the versions are in sync, using the concrete tag, e.g., + ``1.10.0.dev20240610`` instead of ``nightly`` is recommended. diff --git a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst index 0139f3b8dc31..4a9d4607d9a5 100644 --- a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst @@ -1,16 +1,22 @@ Example: FedBN in PyTorch - From Centralized To Federated ========================================================= -This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload with `FedBN `_, a federated training strategy designed for non-iid data. -We are using PyTorch to train a Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. -When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From Centralized To Federated `. +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload with `FedBN `_, a +federated training strategy designed for non-iid data. We are using PyTorch to train a +Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. +When applying FedBN, only few changes needed compared to :doc:`Example: PyTorch - From +Centralized To Federated `. Centralized Training -------------------- -All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated `. -The only thing to do is modifying the file called :code:`cifar.py`, revised part is shown below: -The model architecture defined in class Net() is added with Batch Normalization layers accordingly. +All files are revised based on :doc:`Example: PyTorch - From Centralized To Federated +`. The only thing to do is modifying the +file called ``cifar.py``, revised part is shown below: + +The model architecture defined in class Net() is added with Batch Normalization layers +accordingly. .. code-block:: python @@ -40,26 +46,33 @@ The model architecture defined in class Net() is added with Batch Normalization You can now run your machine learning workload: -.. code-block:: python +.. code-block:: bash python3 cifar.py -So far this should all look fairly familiar if you've used PyTorch before. -Let's take the next step and use what we've built to create a federated learning system within FedBN, the system consists of one server and two clients. +So far this should all look fairly familiar if you've used PyTorch before. Let's take +the next step and use what we've built to create a federated learning system within +FedBN, the system consists of one server and two clients. Federated Training ------------------ -If you have read :doc:`Example: PyTorch - From Centralized To Federated `, the following parts are easy to follow, only :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise. -If not, please read the :doc:`Example: PyTorch - From Centralized To Federated `. first. +If you have read :doc:`Example: PyTorch - From Centralized To Federated +`, the following parts are easy to +follow, only ``get_parameters`` and ``set_parameters`` function in ``client.py`` needed +to revise. If not, please read the :doc:`Example: PyTorch - From Centralized To +Federated `. first. -Our example consists of one *server* and two *clients*. In FedBN, :code:`server.py` keeps unchanged, we can start the server directly. +Our example consists of one *server* and two *clients*. In FedBN, ``server.py`` keeps +unchanged, we can start the server directly. -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will revise our *client* logic by changing :code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, we will exclude batch normalization parameters from model parameter list when sending to or receiving from the server. +Finally, we will revise our *client* logic by changing ``get_parameters`` and +``set_parameters`` in ``client.py``, we will exclude batch normalization parameters from +model parameter list when sending to or receiving from the server. .. code-block:: python @@ -71,11 +84,15 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an def get_parameters(self, config) -> List[np.ndarray]: # Return model parameters as a list of NumPy ndarrays, excluding parameters of BN layers when using FedBN - return [val.cpu().numpy() for name, val in self.model.state_dict().items() if 'bn' not in name] + return [ + val.cpu().numpy() + for name, val in self.model.state_dict().items() + if "bn" not in name + ] def set_parameters(self, parameters: List[np.ndarray]) -> None: # Set model parameters from a list of NumPy ndarrays - keys = [k for k in self.model.state_dict().keys() if 'bn' not in k] + keys = [k for k in self.model.state_dict().keys() if "bn" not in k] params_dict = zip(keys, parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) self.model.load_state_dict(state_dict, strict=False) @@ -84,15 +101,20 @@ Finally, we will revise our *client* logic by changing :code:`get_parameters` an Now, you can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is still running before you do so) and see your (previously centralized) PyTorch project run federated learning with FedBN strategy across two clients. Congratulations! +in each window (make sure that the server is still running before you do so) and see +your (previously centralized) PyTorch project run federated learning with FedBN strategy +across two clients. Congratulations! Next Steps ---------- -The full source code for this example can be found `here `_. -Our example is of course somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using different subsets of CIFAR-10 on each client? How about adding more clients? +The full source code for this example can be found `here +`_. +Our example is of course somewhat over-simplified because both clients load the exact +same dataset, which isn't realistic. You're now prepared to explore this topic further. +How about using different subsets of CIFAR-10 on each client? How about adding more +clients? diff --git a/doc/source/example-jax-from-centralized-to-federated.rst b/doc/source/example-jax-from-centralized-to-federated.rst deleted file mode 100644 index 6b06a288a67a..000000000000 --- a/doc/source/example-jax-from-centralized-to-federated.rst +++ /dev/null @@ -1,282 +0,0 @@ -Example: JAX - Run JAX Federated -================================ - -This tutorial will show you how to use Flower to build a federated version of an existing JAX workload. -We are using JAX to train a linear regression model on a scikit-learn dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. -First, we build a centralized training approach based on the `Linear Regression with JAX `_ tutorial`. -Then, we build upon the centralized training code to run the training in a federated fashion. - -Before we start building our JAX example, we need install the packages :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`: - -.. code-block:: shell - - $ pip install jax jaxlib scikit-learn flwr - - -Linear Regression with JAX --------------------------- - -We begin with a brief description of the centralized training code based on a :code:`Linear Regression` model. -If you want a more in-depth explanation of what's going on then have a look at the official `JAX documentation `_. - -Let's create a new file called :code:`jax_training.py` with all the components required for a traditional (centralized) linear regression training. -First, the JAX packages :code:`jax` and :code:`jaxlib` need to be imported. In addition, we need to import :code:`sklearn` since we use :code:`make_regression` for the dataset and :code:`train_test_split` to split the dataset into a training and test set. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. - -.. code-block:: python - - from typing import Dict, List, Tuple, Callable - import jax - import jax.numpy as jnp - from sklearn.datasets import make_regression - from sklearn.model_selection import train_test_split - - key = jax.random.PRNGKey(0) - -The :code:`load_data()` function loads the mentioned training and test sets. - -.. code-block:: python - - def load_data() -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - # create our dataset and start with similar datasets for different clients - X, y = make_regression(n_features=3, random_state=0) - X, X_test, y, y_test = train_test_split(X, y) - return X, y, X_test, y_test - -The model architecture (a very simple :code:`Linear Regression` model) is defined in :code:`load_model()`. - -.. code-block:: python - - def load_model(model_shape) -> Dict: - # model weights - params = { - 'b' : jax.random.uniform(key), - 'w' : jax.random.uniform(key, model_shape) - } - return params - -We now need to define the training (function :code:`train()`), which loops over the training set and measures the loss (function :code:`loss_fn()`) for each batch of training examples. The loss function is separate since JAX takes derivatives with a :code:`grad()` function (defined in the :code:`main()` function and called in :code:`train()`). - -.. code-block:: python - - def loss_fn(params, X, y) -> Callable: - err = jnp.dot(X, params['w']) + params['b'] - y - return jnp.mean(jnp.square(err)) # mse - - def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: - num_examples = X.shape[0] - for epochs in range(10): - grads = grad_fn(params, X, y) - params = jax.tree_multimap(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params,X, y) - # if epochs % 10 == 9: - # print(f'For Epoch {epochs} loss {loss}') - return params, loss, num_examples - -The evaluation of the model is defined in the function :code:`evaluation()`. The function takes all test examples and measures the loss of the linear regression model. - -.. code-block:: python - - def evaluation(params, grad_fn, X_test, y_test) -> Tuple[float, int]: - num_examples = X_test.shape[0] - err_test = loss_fn(params, X_test, y_test) - loss_test = jnp.mean(jnp.square(err_test)) - # print(f'Test loss {loss_test}') - return loss_test, num_examples - -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model using JAX. As already mentioned, the :code:`jax.grad()` function is defined in :code:`main()` and passed to :code:`train()`. - -.. code-block:: python - - def main(): - X, y, X_test, y_test = load_data() - model_shape = X.shape[1:] - grad_fn = jax.grad(loss_fn) - print("Model Shape", model_shape) - params = load_model(model_shape) - params, loss, num_examples = train(params, grad_fn, X, y) - evaluation(params, grad_fn, X_test, y_test) - - - if __name__ == "__main__": - main() - -You can now run your (centralized) JAX linear regression workload: - -.. code-block:: python - - python3 jax_training.py - -So far this should all look fairly familiar if you've used JAX before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. - -JAX meets Flower ----------------- - -The concept of federating an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`jax_training.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server*, which averages all received parameter updates. -This describes one round of the federated learning process, and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. - -.. code-block:: python - - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) - -We can already start the *server*: - -.. code-block:: python - - python3 server.py - -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined JAX training in :code:`jax_training.py`. -Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxlib` to update the parameters on our JAX model: - -.. code-block:: python - - from typing import Dict, List, Callable, Tuple - - import flwr as fl - import numpy as np - import jax - import jax.numpy as jnp - - import jax_training - - -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`FlowerClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like JAX) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`FlowerClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform parameters to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model parameters and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss to the server - -The challenging part is to transform the JAX model parameters from :code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with `NumPyClient`. - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`evaluate()` previously defined in :code:`jax_training.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. - -.. code-block:: python - - - class FlowerClient(fl.client.NumPyClient): - """Flower client implementing using linear regression and JAX.""" - - def __init__( - self, - params: Dict, - grad_fn: Callable, - train_x: List[np.ndarray], - train_y: List[np.ndarray], - test_x: List[np.ndarray], - test_y: List[np.ndarray], - ) -> None: - self.params= params - self.grad_fn = grad_fn - self.train_x = train_x - self.train_y = train_y - self.test_x = test_x - self.test_y = test_y - - def get_parameters(self, config) -> Dict: - # Return model parameters as a list of NumPy ndarrays - parameter_value = [] - for _, val in self.params.items(): - parameter_value.append(np.array(val)) - return parameter_value - - def set_parameters(self, parameters: List[np.ndarray]) -> Dict: - # Collect model parameters and update the parameters of the local model - value=jnp.ndarray - params_item = list(zip(self.params.keys(),parameters)) - for item in params_item: - key = item[0] - value = item[1] - self.params[key] = value - return self.params - - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - print("Start local training") - self.params = self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train(self.params, self.grad_fn, self.train_x, self.train_y) - results = {"loss": float(loss)} - print("Training results", results) - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[float, int, Dict]: - # Set model parameters, evaluate the model on a local test dataset, return result - print("Start evaluation") - self.params = self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation(self.params,self.grad_fn, self.test_x, self.test_y) - print("Evaluation accuracy & loss", loss) - return ( - float(loss), - num_examples, - {"loss": float(loss)}, - ) - -Having defined the federation process, we can run it. - -.. code-block:: python - - def main() -> None: - """Load data, start MNISTClient.""" - - # Load data - train_x, train_y, test_x, test_y = jax_training.load_data() - grad_fn = jax.grad(jax_training.loss_fn) - - # Load model (from centralized training) and initialize parameters - model_shape = train_x.shape[1:] - params = jax_training.load_model(model_shape) - - # Start Flower client - client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) - fl.client.start_client(server_address="0.0.0.0:8080", client.to_client()) - - if __name__ == "__main__": - main() - - -And that's it. You can now open two additional terminal windows and run - -.. code-block:: python - - python3 client.py - -in each window (make sure that the server is still running before you do so) and see your JAX project run federated learning across two clients. Congratulations! - -Next Steps ----------- - -The source code of this example was improved over time and can be found here: `Quickstart JAX `_. -Our example is somewhat over-simplified because both clients load the same dataset. - -You're now prepared to explore this topic further. How about using a more sophisticated model or using a different dataset? How about adding more clients? diff --git a/doc/source/example-pytorch-from-centralized-to-federated.rst b/doc/source/example-pytorch-from-centralized-to-federated.rst index 0c458a136a81..9629a7fed6e8 100644 --- a/doc/source/example-pytorch-from-centralized-to-federated.rst +++ b/doc/source/example-pytorch-from-centralized-to-federated.rst @@ -1,21 +1,25 @@ Example: PyTorch - From Centralized To Federated ================================================ -This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload. -We are using PyTorch to train a Convolutional Neural Network on the CIFAR-10 dataset. -First, we introduce this machine learning task with a centralized training approach based on the `Deep Learning with PyTorch `_ tutorial. -Then, we build upon the centralized training code to run the training in a federated fashion. +This tutorial will show you how to use Flower to build a federated version of an +existing machine learning workload. We are using PyTorch to train a Convolutional Neural +Network on the CIFAR-10 dataset. First, we introduce this machine learning task with a +centralized training approach based on the `Deep Learning with PyTorch +`_ tutorial. Then, +we build upon the centralized training code to run the training in a federated fashion. Centralized Training -------------------- -We begin with a brief description of the centralized CNN training code. -If you want a more in-depth explanation of what's going on then have a look at the official `PyTorch tutorial `_. +We begin with a brief description of the centralized CNN training code. If you want a +more in-depth explanation of what's going on then have a look at the official `PyTorch +tutorial `_. -Let's create a new file called :code:`cifar.py` with all the components required for a traditional (centralized) training on CIFAR-10. -First, all required packages (such as :code:`torch` and :code:`torchvision`) need to be imported. -You can see that we do not import any package for federated learning. -You can keep all these imports as they are even when we add the federated learning components at a later point. +Let's create a new file called ``cifar.py`` with all the components required for a +traditional (centralized) training on CIFAR-10. First, all required packages (such as +``torch`` and ``torchvision``) need to be imported. You can see that we do not import +any package for federated learning. You can keep all these imports as they are even when +we add the federated learning components at a later point. .. code-block:: python @@ -29,7 +33,9 @@ You can keep all these imports as they are even when we add the federated learni from torch import Tensor from torchvision.datasets import CIFAR10 -As already mentioned we will use the CIFAR-10 dataset for this machine learning workload. The model architecture (a very simple Convolutional Neural Network) is defined in :code:`class Net()`. +As already mentioned we will use the CIFAR-10 dataset for this machine learning +workload. The model architecture (a very simple Convolutional Neural Network) is defined +in ``class Net()``. .. code-block:: python @@ -53,13 +59,17 @@ As already mentioned we will use the CIFAR-10 dataset for this machine learning x = self.fc3(x) return x -The :code:`load_data()` function loads the CIFAR-10 training and test sets. The :code:`transform` normalized the data after loading. +The ``load_data()`` function loads the CIFAR-10 training and test sets. The +``transform`` normalized the data after loading. .. code-block:: python DATA_ROOT = "~/data/cifar-10" - def load_data() -> Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict]: + + def load_data() -> ( + Tuple[torch.utils.data.DataLoader, torch.utils.data.DataLoader, Dict] + ): """Load CIFAR-10 (training and test set).""" transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] @@ -68,12 +78,15 @@ The :code:`load_data()` function loads the CIFAR-10 training and test sets. The trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True) testset = CIFAR10(DATA_ROOT, train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False) - num_examples = {"trainset" : len(trainset), "testset" : len(testset)} + num_examples = {"trainset": len(trainset), "testset": len(testset)} return trainloader, testloader, num_examples -We now need to define the training (function :code:`train()`) which loops over the training set, measures the loss, backpropagates it, and then takes one optimizer step for each batch of training examples. +We now need to define the training (function ``train()``) which loops over the training +set, measures the loss, backpropagates it, and then takes one optimizer step for each +batch of training examples. -The evaluation of the model is defined in the function :code:`test()`. The function loops over all test samples and measures the loss of the model based on the test dataset. +The evaluation of the model is defined in the function ``test()``. The function loops +over all test samples and measures the loss of the model based on the test dataset. .. code-block:: python @@ -133,7 +146,8 @@ The evaluation of the model is defined in the function :code:`test()`. The funct accuracy = correct / total return loss, accuracy -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our CNN on CIFAR-10. +Having defined the data loading, model architecture, training, and evaluation we can put +everything together and train our CNN on CIFAR-10. .. code-block:: python @@ -143,7 +157,7 @@ Having defined the data loading, model architecture, training, and evaluation we print("Load data") trainloader, testloader, _ = load_data() print("Start training") - net=Net().to(DEVICE) + net = Net().to(DEVICE) train(net=net, trainloader=trainloader, epochs=2, device=DEVICE) print("Evaluate model") loss, accuracy = test(net=net, testloader=testloader, device=DEVICE) @@ -156,46 +170,57 @@ Having defined the data loading, model architecture, training, and evaluation we You can now run your machine learning workload: -.. code-block:: python +.. code-block:: bash python3 cifar.py -So far, this should all look fairly familiar if you've used PyTorch before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. +So far, this should all look fairly familiar if you've used PyTorch before. Let's take +the next step and use what we've built to create a simple federated learning system +consisting of one server and two clients. Federated Training ------------------ -The simple machine learning project discussed in the previous section trains the model on a single dataset (CIFAR-10), we call this centralized learning. -This concept of centralized learning, as shown in the previous section, is probably known to most of you, and many of you have used it previously. -Normally, if you'd want to run machine learning workloads in a federated fashion, then you'd have to change most of your code and set everything up from scratch. This can be a considerable effort. +The simple machine learning project discussed in the previous section trains the model +on a single dataset (CIFAR-10), we call this centralized learning. This concept of +centralized learning, as shown in the previous section, is probably known to most of +you, and many of you have used it previously. Normally, if you'd want to run machine +learning workloads in a federated fashion, then you'd have to change most of your code +and set everything up from scratch. This can be a considerable effort. -However, with Flower you can evolve your pre-existing code into a federated learning setup without the need for a major rewrite. +However, with Flower you can evolve your pre-existing code into a federated learning +setup without the need for a major rewrite. -The concept is easy to understand. -We have to start a *server* and then use the code in :code:`cifar.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server* which averages all received parameter updates. -This describes one round of the federated learning process and we repeat this for multiple rounds. +The concept is easy to understand. We have to start a *server* and then use the code in +``cifar.py`` for the *clients* that are connected to the *server*. The *server* sends +model parameters to the clients. The *clients* run the training and update the +parameters. The updated parameters are sent back to the *server* which averages all +received parameter updates. This describes one round of the federated learning process +and we repeat this for multiple rounds. -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. +Our example consists of one *server* and two *clients*. Let's set up ``server.py`` +first. The *server* needs to import the Flower package ``flwr``. Next, we use the +``start_server`` function to start a server and tell it to perform three rounds of +federated learning. .. code-block:: python import flwr as fl if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) + fl.server.start_server( + server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3) + ) We can already start the *server*: -.. code-block:: python +.. code-block:: bash python3 server.py -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined centralized training in :code:`cifar.py`. -Our *client* needs to import :code:`flwr`, but also :code:`torch` to update the parameters on our PyTorch model: +Finally, we will define our *client* logic in ``client.py`` and build upon the +previously defined centralized training in ``cifar.py``. Our *client* needs to import +``flwr``, but also ``torch`` to update the parameters on our PyTorch model: .. code-block:: python @@ -210,28 +235,38 @@ Our *client* needs to import :code:`flwr`, but also :code:`torch` to update the DEVICE: str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`CifarClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`CifarClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: - -#. :code:`set_parameters` - * set the model parameters on the local model that are received from the server - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model weights and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss and accuracy to the server - -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`test()` previously defined in :code:`cifar.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. +Implementing a Flower *client* basically means implementing a subclass of either +``flwr.client.Client`` or ``flwr.client.NumPyClient``. Our implementation will be based +on ``flwr.client.NumPyClient`` and we'll call it ``CifarClient``. ``NumPyClient`` is +slightly easier to implement than ``Client`` if you use a framework with good NumPy +interoperability (like PyTorch or TensorFlow/Keras) because it avoids some of the +boilerplate that would otherwise be necessary. ``CifarClient`` needs to implement four +methods, two methods for getting/setting model parameters, one method for training the +model, and one method for testing the model: + +1. ``set_parameters`` + - set the model parameters on the local model that are received from the server + - loop over the list of model parameters received as NumPy ``ndarray``'s (think + list of neural network layers) +2. ``get_parameters`` + - get the model parameters and return them as a list of NumPy ``ndarray``'s + (which is what ``flwr.client.NumPyClient`` expects) +3. ``fit`` + - update the parameters of the local model with the parameters received from the + server + - train the model on the local training set + - get the updated local model weights and return them to the server +4. ``evaluate`` + - update the parameters of the local model with the parameters received from the + server + - evaluate the updated model on the local test set + - return the local loss and accuracy to the server + +The two ``NumPyClient`` methods ``fit`` and ``evaluate`` make use of the functions +``train()`` and ``test()`` previously defined in ``cifar.py``. So what we really do here +is we tell Flower through our ``NumPyClient`` subclass which of our already defined +functions to call for training and evaluation. We included type annotations to give you +a better understanding of the data types that get passed around. .. code-block:: python @@ -277,8 +312,10 @@ We included type annotations to give you a better understanding of the data type loss, accuracy = cifar.test(self.model, self.testloader, device=DEVICE) return float(loss), self.num_examples["testset"], {"accuracy": float(accuracy)} -All that's left to do it to define a function that loads both model and data, creates a :code:`CifarClient`, and starts this client. -You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient` with the function :code:`fl.client.start_client()` by pointing it at the same IP address we used in :code:`server.py`: +All that's left to do it to define a function that loads both model and data, creates a +``CifarClient``, and starts this client. You load your data and model by using +``cifar.py``. Start ``CifarClient`` with the function ``fl.client.start_client()`` by +pointing it at the same IP address we used in ``server.py``: .. code-block:: python @@ -300,15 +337,20 @@ You load your data and model by using :code:`cifar.py`. Start :code:`CifarClient And that's it. You can now open two additional terminal windows and run -.. code-block:: python +.. code-block:: bash python3 client.py -in each window (make sure that the server is running before you do so) and see your (previously centralized) PyTorch project run federated learning across two clients. Congratulations! +in each window (make sure that the server is running before you do so) and see your +(previously centralized) PyTorch project run federated learning across two clients. +Congratulations! Next Steps ---------- -The full source code for this example: `PyTorch: From Centralized To Federated (Code) `_. -Our example is, of course, somewhat over-simplified because both clients load the exact same dataset, which isn't realistic. -You're now prepared to explore this topic further. How about using different subsets of CIFAR-10 on each client? How about adding more clients? +The full source code for this example: `PyTorch: From Centralized To Federated (Code) +`_. +Our example is, of course, somewhat over-simplified because both clients load the exact +same dataset, which isn't realistic. You're now prepared to explore this topic further. +How about using different subsets of CIFAR-10 on each client? How about adding more +clients? diff --git a/doc/source/explanation-differential-privacy.rst b/doc/source/explanation-differential-privacy.rst index e488f5ccbd57..06e9dbdedd39 100644 --- a/doc/source/explanation-differential-privacy.rst +++ b/doc/source/explanation-differential-privacy.rst @@ -1,133 +1,171 @@ Differential Privacy ==================== -The information in datasets like healthcare, financial transactions, user preferences, etc., is valuable and has the potential for scientific breakthroughs and provides important business insights. -However, such data is also sensitive and there is a risk of compromising individual privacy. -Traditional methods like anonymization alone would not work because of attacks like Re-identification and Data Linkage. -That's where differential privacy comes in. It provides the possibility of analyzing data while ensuring the privacy of individuals. +The information in datasets like healthcare, financial transactions, user preferences, +etc., is valuable and has the potential for scientific breakthroughs and provides +important business insights. However, such data is also sensitive and there is a risk of +compromising individual privacy. +Traditional methods like anonymization alone would not work because of attacks like +Re-identification and Data Linkage. That's where differential privacy comes in. It +provides the possibility of analyzing data while ensuring the privacy of individuals. Differential Privacy -------------------- -Imagine two datasets that are identical except for a single record (for instance, Alice's data). -Differential Privacy (DP) guarantees that any analysis (M), like calculating the average income, will produce nearly identical results for both datasets (O and O' would be similar). -This preserves group patterns while obscuring individual details, ensuring the individual's information remains hidden in the crowd. -.. image:: ./_static/DP/dp-intro.png - :align: center - :width: 400 - :alt: DP Intro +Imagine two datasets that are identical except for a single record (for instance, +Alice's data). Differential Privacy (DP) guarantees that any analysis (M), like +calculating the average income, will produce nearly identical results for both datasets +(O and O' would be similar). This preserves group patterns while obscuring individual +details, ensuring the individual's information remains hidden in the crowd. +.. image:: ./_static/DP/dp-intro.png + :align: center + :width: 400 + :alt: DP Intro -One of the most commonly used mechanisms to achieve DP is adding enough noise to the output of the analysis to mask the contribution of each individual in the data while preserving the overall accuracy of the analysis. +One of the most commonly used mechanisms to achieve DP is adding enough noise to the +output of the analysis to mask the contribution of each individual in the data while +preserving the overall accuracy of the analysis. Formal Definition ~~~~~~~~~~~~~~~~~ -Differential Privacy (DP) provides statistical guarantees against the information an adversary can infer through the output of a randomized algorithm. -It provides an unconditional upper bound on the influence of a single individual on the output of the algorithm by adding noise [1]. -A randomized mechanism -M provides (:math:`\epsilon`, :math:`\delta`)-differential privacy if for any two neighboring databases, D :sub:`1` and D :sub:`2`, that differ in only a single record, -and for all possible outputs S ⊆ Range(A): - -.. math:: - \small - P[M(D_{1} \in A)] \leq e^{\epsilon} P[M(D_{2} \in A)] + \delta +Differential Privacy (DP) provides statistical guarantees against the information an +adversary can infer through the output of a randomized algorithm. It provides an +unconditional upper bound on the influence of a single individual on the output of the +algorithm by adding noise [1]. A randomized mechanism M provides (:math:`\epsilon`, +:math:`\delta`)-differential privacy if for any two neighboring databases, D :sub:`1` +and D :sub:`2`, that differ in only a single record, and for all possible outputs S ⊆ +Range(A): +.. math:: -The :math:`\epsilon` parameter, also known as the privacy budget, is a metric of privacy loss. -It also controls the privacy-utility trade-off; lower :math:`\epsilon` values indicate higher levels of privacy but are likely to reduce utility as well. -The :math:`\delta` parameter accounts for a small probability on which the upper bound :math:`\epsilon` does not hold. -The amount of noise needed to achieve differential privacy is proportional to the sensitivity of the output, which measures the maximum change in the output due to the inclusion or removal of a single record. + \small + P[M(D_{1} \in A)] \leq e^{\epsilon} P[M(D_{2} \in A)] + \delta +The :math:`\epsilon` parameter, also known as the privacy budget, is a metric of privacy +loss. It also controls the privacy-utility trade-off; lower :math:`\epsilon` values +indicate higher levels of privacy but are likely to reduce utility as well. The +:math:`\delta` parameter accounts for a small probability on which the upper bound +:math:`\epsilon` does not hold. The amount of noise needed to achieve differential +privacy is proportional to the sensitivity of the output, which measures the maximum +change in the output due to the inclusion or removal of a single record. Differential Privacy in Machine Learning ---------------------------------------- + DP can be utilized in machine learning to preserve the privacy of the training data. -Differentially private machine learning algorithms are designed in a way to prevent the algorithm to learn any specific information about any individual data points and subsequently prevent the model from revealing sensitive information. -Depending on the stage at which noise is introduced, various methods exist for applying DP to machine learning algorithms. -One approach involves adding noise to the training data (either to the features or labels), while another method entails injecting noise into the gradients of the loss function during model training. -Additionally, such noise can be incorporated into the model's output. +Differentially private machine learning algorithms are designed in a way to prevent the +algorithm to learn any specific information about any individual data points and +subsequently prevent the model from revealing sensitive information. Depending on the +stage at which noise is introduced, various methods exist for applying DP to machine +learning algorithms. One approach involves adding noise to the training data (either to +the features or labels), while another method entails injecting noise into the gradients +of the loss function during model training. Additionally, such noise can be incorporated +into the model's output. Differential Privacy in Federated Learning ------------------------------------------ -Federated learning is a data minimization approach that allows multiple parties to collaboratively train a model without sharing their raw data. -However, federated learning also introduces new privacy challenges. The model updates between parties and the central server can leak information about the local data. -These leaks can be exploited by attacks such as membership inference and property inference attacks, or model inversion attacks. -DP can play a crucial role in federated learning to provide privacy for the clients' data. +Federated learning is a data minimization approach that allows multiple parties to +collaboratively train a model without sharing their raw data. However, federated +learning also introduces new privacy challenges. The model updates between parties and +the central server can leak information about the local data. These leaks can be +exploited by attacks such as membership inference and property inference attacks, or +model inversion attacks. -Depending on the granularity of privacy provision or the location of noise addition, different forms of DP exist in federated learning. -In this explainer, we focus on two approaches of DP utilization in federated learning based on where the noise is added: at the server (also known as the center) or at the client (also known as the local). +DP can play a crucial role in federated learning to provide privacy for the clients' +data. -- **Central Differential Privacy**: DP is applied by the server and the goal is to prevent the aggregated model from leaking information about each client's data. +Depending on the granularity of privacy provision or the location of noise addition, +different forms of DP exist in federated learning. In this explainer, we focus on two +approaches of DP utilization in federated learning based on where the noise is added: at +the server (also known as the center) or at the client (also known as the local). -- **Local Differential Privacy**: DP is applied on the client side before sending any information to the server and the goal is to prevent the updates that are sent to the server from leaking any information about the client's data. +- **Central Differential Privacy**: DP is applied by the server and the goal is to + prevent the aggregated model from leaking information about each client's data. +- **Local Differential Privacy**: DP is applied on the client side before sending any + information to the server and the goal is to prevent the updates that are sent to the + server from leaking any information about the client's data. Central Differential Privacy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this approach, which is also known as user-level DP, the central server is responsible for adding noise to the globally aggregated parameters. It should be noted that trust in the server is required. + +In this approach, which is also known as user-level DP, the central server is +responsible for adding noise to the globally aggregated parameters. It should be noted +that trust in the server is required. .. image:: ./_static/DP/CDP.png - :align: center - :width: 400 - :alt: Central Differential Privacy - -While there are various ways to implement central DP in federated learning, we concentrate on the algorithms proposed by [2] and [3]. -The overall approach is to clip the model updates sent by the clients and add some amount of noise to the aggregated model. -In each iteration, a random set of clients is chosen with a specific probability for training. -Each client performs local training on its own data. -The update of each client is then clipped by some value `S` (sensitivity `S`). -This would limit the impact of any individual client which is crucial for privacy and often beneficial for robustness. -A common approach to achieve this is by restricting the `L2` norm of the clients' model updates, ensuring that larger updates are scaled down to fit within the norm `S`. + :align: center + :width: 400 + :alt: Central Differential Privacy + +While there are various ways to implement central DP in federated learning, we +concentrate on the algorithms proposed by [2] and [3]. The overall approach is to clip +the model updates sent by the clients and add some amount of noise to the aggregated +model. In each iteration, a random set of clients is chosen with a specific probability +for training. Each client performs local training on its own data. The update of each +client is then clipped by some value `S` (sensitivity `S`). This would limit the impact +of any individual client which is crucial for privacy and often beneficial for +robustness. A common approach to achieve this is by restricting the `L2` norm of the +clients' model updates, ensuring that larger updates are scaled down to fit within the +norm `S`. .. image:: ./_static/DP/clipping.png - :align: center - :width: 300 - :alt: clipping + :align: center + :width: 300 + :alt: clipping -Afterwards, the Gaussian mechanism is used to add noise in order to distort the sum of all clients' updates. -The amount of noise is scaled to the sensitivity value to obtain a privacy guarantee. -The Gaussian mechanism is used with a noise sampled from `N (0, σ²)` where `σ = ( noise_scale * S ) / (number of sampled clients)`. +Afterwards, the Gaussian mechanism is used to add noise in order to distort the sum of +all clients' updates. The amount of noise is scaled to the sensitivity value to obtain a +privacy guarantee. The Gaussian mechanism is used with a noise sampled from `N (0, σ²)` +where `σ = ( noise_scale * S ) / (number of sampled clients)`. Clipping -^^^^^^^^ - -There are two forms of clipping commonly used in Central DP: Fixed Clipping and Adaptive Clipping. +++++++++ -- **Fixed Clipping** : A predefined fix threshold is set for the magnitude of clients' updates. Any update exceeding this threshold is clipped back to the threshold value. +There are two forms of clipping commonly used in Central DP: Fixed Clipping and Adaptive +Clipping. -- **Adaptive Clipping** : The clipping threshold dynamically adjusts based on the observed update distribution [4]. It means that the clipping value is tuned during the rounds with respect to the quantile of the update norm distribution. +- **Fixed Clipping** : A predefined fix threshold is set for the magnitude of clients' + updates. Any update exceeding this threshold is clipped back to the threshold value. +- **Adaptive Clipping** : The clipping threshold dynamically adjusts based on the + observed update distribution [4]. It means that the clipping value is tuned during the + rounds with respect to the quantile of the update norm distribution. -The choice between fixed and adaptive clipping depends on various factors such as privacy requirements, data distribution, model complexity, and others. +The choice between fixed and adaptive clipping depends on various factors such as +privacy requirements, data distribution, model complexity, and others. Local Differential Privacy ~~~~~~~~~~~~~~~~~~~~~~~~~~ -In this approach, each client is responsible for performing DP. -Local DP avoids the need for a fully trusted aggregator, but it should be noted that local DP leads to a decrease in accuracy but better privacy in comparison to central DP. +In this approach, each client is responsible for performing DP. Local DP avoids the need +for a fully trusted aggregator, but it should be noted that local DP leads to a decrease +in accuracy but better privacy in comparison to central DP. .. image:: ./_static/DP/LDP.png - :align: center - :width: 400 - :alt: Local Differential Privacy - + :align: center + :width: 400 + :alt: Local Differential Privacy In this explainer, we focus on two forms of achieving Local DP: -- Each client adds noise to the local updates before sending them to the server. To achieve (:math:`\epsilon`, :math:`\delta`)-DP, considering the sensitivity of the local model to be ∆, Gaussian noise is applied with a noise scale of σ where: +- Each client adds noise to the local updates before sending them to the server. To + achieve (:math:`\epsilon`, :math:`\delta`)-DP, considering the sensitivity of the + local model to be ∆, Gaussian noise is applied with a noise scale of σ where: .. math:: + \small \frac{∆ \times \sqrt{2 \times \log\left(\frac{1.25}{\delta}\right)}}{\epsilon} - -- Each client adds noise to the gradients of the model during the local training (DP-SGD). More specifically, in this approach, gradients are clipped and an amount of calibrated noise is injected into the gradients. - +- Each client adds noise to the gradients of the model during the local training + (DP-SGD). More specifically, in this approach, gradients are clipped and an amount of + calibrated noise is injected into the gradients. Please note that these two approaches are providing privacy at different levels. - **References:** [1] Dwork et al. The Algorithmic Foundations of Differential Privacy. diff --git a/doc/source/explanation-federated-evaluation.rst b/doc/source/explanation-federated-evaluation.rst index bcdca9bae700..c56a5d48b2f6 100644 --- a/doc/source/explanation-federated-evaluation.rst +++ b/doc/source/explanation-federated-evaluation.rst @@ -1,8 +1,8 @@ Federated evaluation ==================== -There are two main approaches to evaluating models in federated learning systems: centralized (or server-side) evaluation and federated (or client-side) evaluation. - +There are two main approaches to evaluating models in federated learning systems: +centralized (or server-side) evaluation and federated (or client-side) evaluation. Centralized Evaluation ---------------------- @@ -10,15 +10,17 @@ Centralized Evaluation Built-In Strategies ~~~~~~~~~~~~~~~~~~~ -All built-in strategies support centralized evaluation by providing an evaluation function during initialization. -An evaluation function is any function that can take the current global model parameters as input and return evaluation results: +All built-in strategies support centralized evaluation by providing an evaluation +function during initialization. An evaluation function is any function that can take the +current global model parameters as input and return evaluation results: .. code-block:: python - + from flwr.common import NDArrays, Scalar - + from typing import Dict, Optional, Tuple + def get_evaluate_fn(model): """Return an evaluation function for server-side evaluation.""" @@ -38,6 +40,7 @@ An evaluation function is any function that can take the current global model pa return evaluate + # Load and compile model for server-side parameter evaluation model = tf.keras.applications.EfficientNetB0( input_shape=(32, 32, 3), weights=None, classes=10 @@ -47,7 +50,7 @@ An evaluation function is any function that can take the current global model pa # Create strategy strategy = fl.server.strategy.FedAvg( - # ... other FedAvg arguments + # ... other FedAvg arguments evaluate_fn=get_evaluate_fn(model), ) @@ -57,9 +60,10 @@ An evaluation function is any function that can take the current global model pa Custom Strategies ~~~~~~~~~~~~~~~~~ -The :code:`Strategy` abstraction provides a method called :code:`evaluate` that can directly be used to evaluate the current global model parameters. -The current server implementation calls :code:`evaluate` after parameter aggregation and before federated evaluation (see next paragraph). - +The ``Strategy`` abstraction provides a method called ``evaluate`` that can directly be +used to evaluate the current global model parameters. The current server implementation +calls ``evaluate`` after parameter aggregation and before federated evaluation (see next +paragraph). Federated Evaluation -------------------- @@ -67,7 +71,8 @@ Federated Evaluation Implementing Federated Evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Client-side evaluation happens in the :code:`Client.evaluate` method and can be configured from the server side. +Client-side evaluation happens in the ``Client.evaluate`` method and can be configured +from the server side. .. code-block:: python @@ -79,9 +84,11 @@ Client-side evaluation happens in the :code:`Client.evaluate` method and can be def get_parameters(self, config): # ... + pass def fit(self, parameters, config): # ... + pass def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" @@ -100,12 +107,27 @@ Client-side evaluation happens in the :code:`Client.evaluate` method and can be Configuring Federated Evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Federated evaluation can be configured from the server side. Built-in strategies support the following arguments: - -- :code:`fraction_evaluate`: a :code:`float` defining the fraction of clients that will be selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients are connected to the server, then :code:`10` will be randomly selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, federated evaluation will be disabled. -- :code:`min_evaluate_clients`: an :code:`int`: the minimum number of clients to be selected for evaluation. If :code:`fraction_evaluate` is set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and :code:`100` clients are connected to the server, then :code:`20` clients will be selected for evaluation. -- :code:`min_available_clients`: an :code:`int` that defines the minimum number of clients which need to be connected to the server before a round of federated evaluation can start. If fewer than :code:`min_available_clients` are connected to the server, the server will wait until more clients are connected before it continues to sample clients for evaluation. -- :code:`on_evaluate_config_fn`: a function that returns a configuration dictionary which will be sent to the selected clients. The function will be called during each round and provides a convenient way to customize client-side evaluation from the server side, for example, to configure the number of validation steps performed. +Federated evaluation can be configured from the server side. Built-in strategies support +the following arguments: + +- ``fraction_evaluate``: a ``float`` defining the fraction of clients that will be + selected for evaluation. If ``fraction_evaluate`` is set to ``0.1`` and ``100`` + clients are connected to the server, then ``10`` will be randomly selected for + evaluation. If ``fraction_evaluate`` is set to ``0.0``, federated evaluation will be + disabled. +- ``min_evaluate_clients``: an ``int``: the minimum number of clients to be selected for + evaluation. If ``fraction_evaluate`` is set to ``0.1``, ``min_evaluate_clients`` is + set to 20, and ``100`` clients are connected to the server, then ``20`` clients will + be selected for evaluation. +- ``min_available_clients``: an ``int`` that defines the minimum number of clients which + need to be connected to the server before a round of federated evaluation can start. + If fewer than ``min_available_clients`` are connected to the server, the server will + wait until more clients are connected before it continues to sample clients for + evaluation. +- ``on_evaluate_config_fn``: a function that returns a configuration dictionary which + will be sent to the selected clients. The function will be called during each round + and provides a convenient way to customize client-side evaluation from the server + side, for example, to configure the number of validation steps performed. .. code-block:: python @@ -118,6 +140,7 @@ Federated evaluation can be configured from the server side. Built-in strategies val_steps = 5 if server_round < 4 else 10 return {"val_steps": val_steps} + # Create strategy strategy = fl.server.strategy.FedAvg( # ... other FedAvg arguments @@ -130,11 +153,11 @@ Federated evaluation can be configured from the server side. Built-in strategies # Start Flower server for four rounds of federated learning fl.server.start_server(server_address="[::]:8080", strategy=strategy) - Evaluating Local Model Updates During Training ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Model parameters can also be evaluated during training. :code:`Client.fit` can return arbitrary evaluation results as a dictionary: +Model parameters can also be evaluated during training. ``Client.fit`` can return +arbitrary evaluation results as a dictionary: .. code-block:: python @@ -146,6 +169,7 @@ Model parameters can also be evaluated during training. :code:`Client.fit` can r def get_parameters(self, config): # ... + pass def fit(self, parameters, config): """Train parameters on the locally held training set.""" @@ -171,9 +195,12 @@ Model parameters can also be evaluated during training. :code:`Client.fit` can r def evaluate(self, parameters, config): # ... - + pass Full Code Example ----------------- -For a full code example that uses both centralized and federated evaluation, see the *Advanced TensorFlow Example* (the same approach can be applied to workloads implemented in any other framework): https://github.com/adap/flower/tree/main/examples/advanced-tensorflow +For a full code example that uses both centralized and federated evaluation, see the +*Advanced TensorFlow Example* (the same approach can be applied to workloads implemented +in any other framework): +https://github.com/adap/flower/tree/main/examples/advanced-tensorflow diff --git a/doc/source/explanation-flower-architecture.rst b/doc/source/explanation-flower-architecture.rst new file mode 100644 index 000000000000..e82da56dcefa --- /dev/null +++ b/doc/source/explanation-flower-architecture.rst @@ -0,0 +1,158 @@ +Flower Architecture +=================== + +This page explains the architecture of deployed Flower federated learning system. + +In federated learning (FL), there is typically one server and a number of clients that +are connected to the server. This is often called a federation. + +The role of the server is to coordinate the training process. The role of each client is +to receive tasks from the server, execute those tasks and return the results back to the +server. + +This is sometimes called a hub-and-spoke topology: + +.. figure:: ./_static/flower-architecture-hub-and-spoke.svg + :align: center + :width: 600 + :alt: Hub-and-spoke topology in federated learning + :class: no-scaled-link + + Hub-and-spoke topology in federated learning (one server, multiple clients). + +In a real-world deployment, we typically want to run different projects on such a +federation. Each project could use different hyperparameters, different model +architectures, different aggregation strategies, or even different machine learning +frameworks like PyTorch and TensorFlow. + +This is why, in Flower, both the server side and the client side are split into two +parts. One part is long-lived and responsible for communicating across the network, the +other part is short-lived and executes task-specific code. + +A Flower `server` consists of **SuperLink** and ``ServerApp``: + +- **SuperLink**: a long-running process that forwards task instructions to clients + (SuperNodes) and receives task results back. +- ``ServerApp``: a short-lived process with project-spcific code that customizes all + server-side aspects of federated learning systems (client selection, client + configuration, result aggregation). This is what AI researchers and AI engineers write + when they build Flower apps. + +A Flower `client` consists of **SuperNode** and ``ClientApp``: + +- **SuperNode**: a long-running process that connects to the SuperLink, asks for tasks, + executes tasks (for example, "train this model on your local data") and returns task + results back to the SuperLink. +- ``ClientApp``: a short-lived process with project-specific code that customizes all + client-side aspects of federated learning systems (local model training and + evaluation, pre- and post-processing). This is what AI researchers and AI engineers + write when they build Flower apps. + +Why SuperNode and SuperLink? Well, in federated learning, the clients are the actual +stars of the show. They hold the training data and they run the actual training. This is +why Flower decided to name them **SuperNode**. The **SuperLink** is then responsible for +acting as the `missing link` between all those SuperNodes. + +.. figure:: ./_static/flower-architecture-basic-architecture.svg + :align: center + :width: 600 + :alt: Basic Flower architecture + :class: no-scaled-link + + The basic Flower architecture for federated learning. + +In a Flower app project, users will typically develop the ``ServerApp`` and the +``ClientApp``. All the network communication between `server` and `clients` is taken +care of by the SuperLink and SuperNodes. + +.. tip:: + + For more details, please refer to the |serverapp_link|_ and |clientapp_link|_ + documentation. + +With *multi-run*, multiple ``ServerApp``\s and ``ClientApp``\s are now capable of +running on the same federation consisting of a single long-running SuperLink and +multiple long-running SuperNodes. This is sometimes referred to as `multi-tenancy` or +`multi-job`. + +As shown in the figure below, two projects, each consisting of a ``ServerApp`` and a +``ClientApp``, could share the same SuperLink and SuperNodes. + +.. figure:: ./_static/flower-architecture-multi-run.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture + :class: no-scaled-link + + Multi-tenancy federated learning architecture with Flower + +To illustrate how multi-run works, consider one federated learning training run where a +``ServerApp`` and a ``ClientApp`` are participating in ``[run 1]``. Note that a +SuperNode will only run a ``ClientApp`` if it is selected to participate in the training +run. + +In ``[run 1]`` below, all the SuperNodes are selected and therefore run their +corresponding ``ClientApp``\s: + +.. figure:: ./_static/flower-architecture-multi-run-1.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 1 + :class: no-scaled-link + + Run 1 in a multi-run federated learning architecture with Flower. All SuperNodes + participate in the training round. + +However, in ``[run 2]``, only the first and third SuperNodes are selected to participate +in the training: + +.. figure:: ./_static/flower-architecture-multi-run-2.svg + :align: center + :width: 600 + :alt: Multi-tenancy federated learning architecture - Run 2 + :class: no-scaled-link + + Run 2 in a multi-run federated learning architecture with Flower. Only the first and + third SuperNodes are selected to participate in the training round. + +Therefore, with Flower multi-run, different projects (each consisting of a ``ServerApp`` +and ``ClientApp``) can run on different sets of clients. + +To help you start and manage all of the concurrently executing training runs, Flower +offers one additional long-running server-side service called **SuperExec**. When you +type ``flwr run`` to start a new training run, the ``flwr`` CLI bundles your local +project (mainly your ``ServerApp`` and ``ClientApp``) and sends it to the **SuperExec**. +The **SuperExec** will then take care of starting and managing your ``ServerApp``, which +in turn selects SuperNodes to execute your ``ClientApp``. + +This architecture allows many users to (concurrently) run their projects on the same +federation, simply by typing ``flwr run`` on their local developer machine. + +.. figure:: ./_static/flower-architecture-deployment-engine.svg + :align: center + :width: 800 + :alt: Flower Deployment Engine with SuperExec + :class: no-scaled-link + + The SuperExec service for managing concurrent training runs in Flower. + +.. note:: + + This explanation covers the Flower Deployment Engine. An explanation covering the + Flower Simulation Engine will follow. + +.. important:: + + As we continue to enhance Flower at a rapid pace, we'll periodically update this + explainer document. Feel free to share any feedback with us. + +.. |clientapp_link| replace:: ``ClientApp`` + +.. |serverapp_link| replace:: ``ServerApp`` + +.. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _serverapp_link: ref-api/flwr.server.ServerApp.html + +.. meta:: + :description: Explore the federated learning architecture of the Flower framework, featuring multi-run, concurrent execution, and scalable, secure machine learning while preserving data privacy. diff --git a/doc/source/how-to-aggregate-evaluation-results.rst b/doc/source/how-to-aggregate-evaluation-results.rst index fa4ba88b8ff0..be6e20068c88 100644 --- a/doc/source/how-to-aggregate-evaluation-results.rst +++ b/doc/source/how-to-aggregate-evaluation-results.rst @@ -1,14 +1,15 @@ Aggregate evaluation results ============================ -The Flower server does not prescribe a way to aggregate evaluation results, but it enables the user to fully customize result aggregation. - +The Flower server does not prescribe a way to aggregate evaluation results, but it +enables the user to fully customize result aggregation. Aggregate Custom Evaluation Results ----------------------------------- -The same :code:`Strategy`-customization approach can be used to aggregate custom evaluation results coming from individual clients. -Clients can return custom metrics to the server by returning a dictionary: +The same ``Strategy``-customization approach can be used to aggregate custom evaluation +results coming from individual clients. Clients can return custom metrics to the server +by returning a dictionary: .. code-block:: python @@ -16,9 +17,11 @@ Clients can return custom metrics to the server by returning a dictionary: def get_parameters(self, config): # ... + pass def fit(self, parameters, config): # ... + pass def evaluate(self, parameters, config): """Evaluate parameters on the locally held test set.""" @@ -33,7 +36,8 @@ Clients can return custom metrics to the server by returning a dictionary: num_examples_test = len(self.x_test) return loss, num_examples_test, {"accuracy": accuracy} -The server can then use a customized strategy to aggregate the metrics provided in these dictionaries: +The server can then use a customized strategy to aggregate the metrics provided in these +dictionaries: .. code-block:: python @@ -50,7 +54,9 @@ The server can then use a customized strategy to aggregate the metrics provided return None, {} # Call aggregate_evaluate from base class (FedAvg) to aggregate loss and metrics - aggregated_loss, aggregated_metrics = super().aggregate_evaluate(server_round, results, failures) + aggregated_loss, aggregated_metrics = super().aggregate_evaluate( + server_round, results, failures + ) # Weigh accuracy of each client by number of examples used accuracies = [r.metrics["accuracy"] * r.num_examples for _, r in results] @@ -58,11 +64,14 @@ The server can then use a customized strategy to aggregate the metrics provided # Aggregate and print custom metric aggregated_accuracy = sum(accuracies) / sum(examples) - print(f"Round {server_round} accuracy aggregated from client results: {aggregated_accuracy}") + print( + f"Round {server_round} accuracy aggregated from client results: {aggregated_accuracy}" + ) # Return aggregated loss and metrics (i.e., aggregated accuracy) return aggregated_loss, {"accuracy": aggregated_accuracy} + # Create strategy and run server strategy = AggregateCustomMetricStrategy( # (same arguments as FedAvg here) diff --git a/doc/source/how-to-authenticate-supernodes.rst b/doc/source/how-to-authenticate-supernodes.rst index 73987261b29f..a2dd499dbc10 100644 --- a/doc/source/how-to-authenticate-supernodes.rst +++ b/doc/source/how-to-authenticate-supernodes.rst @@ -1,79 +1,104 @@ Authenticate SuperNodes ======================= -Flower has built-in support for authenticated SuperNodes that you can use to verify the identities of each SuperNode connecting to a SuperLink. -Flower node authentication works similar to how GitHub SSH authentication works: +Flower has built-in support for authenticated SuperNodes that you can use to verify the +identities of each SuperNode connecting to a SuperLink. Flower node authentication works +similar to how GitHub SSH authentication works: -* SuperLink (server) stores a list of known (client) node public keys -* Using ECDH, both SuperNode and SuperLink independently derive a shared secret -* Shared secret is used to compute the HMAC value of the message sent from SuperNode to SuperLink as a token -* SuperLink verifies the token +- SuperLink (server) stores a list of known (client) node public keys +- Using ECDH, both SuperNode and SuperLink independently derive a shared secret +- Shared secret is used to compute the HMAC value of the message sent from SuperNode to + SuperLink as a token +- SuperLink verifies the token -We recommend you to check out the complete `code example `_ demonstrating federated learning with Flower in an authenticated setting. +We recommend you to check out the complete `code example +`_ +demonstrating federated learning with Flower in an authenticated setting. .. note:: + This guide covers a preview feature that might change in future versions of Flower. .. note:: - For increased security, node authentication can only be used when encrypted connections (SSL/TLS) are enabled. -Enable node authentication in :code:`SuperLink` ------------------------------------------------ + For increased security, node authentication can only be used when encrypted + connections (SSL/TLS) are enabled. + +Enable node authentication in ``SuperLink`` +------------------------------------------- -To enable node authentication, first you need to configure SSL/TLS connections to secure the SuperLink<>SuperNode communication. You can find the complete guide -`here `_. -After configuring secure connections, you can enable client authentication in a long-running Flower :code:`SuperLink`. -Use the following terminal command to start a Flower :code:`SuperNode` that has both secure connections and node authentication enabled: +To enable node authentication, first you need to configure SSL/TLS connections to secure +the SuperLink<>SuperNode communication. You can find the complete guide `here +`_. After +configuring secure connections, you can enable client authentication in a long-running +Flower ``SuperLink``. Use the following terminal command to start a Flower ``SuperNode`` +that has both secure connections and node authentication enabled: .. code-block:: bash flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key --auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub - + Let's break down the authentication flags: -1. The first flag :code:`--auth-list-public-keys` expects a path to a CSV file storing all known node public keys. You need to store all known node public keys that are allowed to participate in a federation in one CSV file (:code:`.csv`). +1. The first flag ``--auth-list-public-keys`` expects a path to a CSV file storing all + known node public keys. You need to store all known node public keys that are allowed + to participate in a federation in one CSV file (``.csv``). - A valid CSV file storing known node public keys should list the keys in OpenSSH format, separated by commas and without any comments. For an example, refer to our code sample, which contains a CSV file with two known node public keys. + A valid CSV file storing known node public keys should list the keys in OpenSSH + format, separated by commas and without any comments. For an example, refer to + our code sample, which contains a CSV file with two known node public keys. -2. The second and third flags :code:`--auth-superlink-private-key` and :code:`--auth-superlink-public-key` expect paths to the server's private and public keys. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. +2. The second and third flags ``--auth-superlink-private-key`` and + ``--auth-superlink-public-key`` expect paths to the server's private and public keys. + For development purposes, you can generate a private and public key pair using + ``ssh-keygen -t ecdsa -b 384``. .. note:: - In Flower 1.9, there is no support for dynamically removing, editing, or adding known node public keys to the SuperLink. - To change the set of known nodes, you need to shut the server down, edit the CSV file, and start the server again. - Support for dynamically changing the set of known nodes is on the roadmap to be released in Flower 1.10 (ETA: June). + In Flower 1.9, there is no support for dynamically removing, editing, or adding + known node public keys to the SuperLink. To change the set of known nodes, you need + to shut the server down, edit the CSV file, and start the server again. Support for + dynamically changing the set of known nodes is on the roadmap to be released in + Flower 1.10 (ETA: June). -Enable node authentication in :code:`SuperNode` -------------------------------------------------- +Enable node authentication in ``SuperNode`` +------------------------------------------- -Similar to the long-running Flower server (:code:`SuperLink`), you can easily enable node authentication in the long-running Flower client (:code:`SuperNode`). -Use the following terminal command to start an authenticated :code:`SuperNode`: +Similar to the long-running Flower server (``SuperLink``), you can easily enable node +authentication in the long-running Flower client (``SuperNode``). Use the following +terminal command to start an authenticated ``SuperNode``: .. code-block:: bash - - flower-client-app client:app - --root-certificates certificates/ca.crt - --superlink 127.0.0.1:9092 - --auth-supernode-private-key keys/client_credentials - --auth-supernode-public-key keys/client_credentials.pub -The :code:`--auth-supernode-private-key` flag expects a path to the node's private key file and the :code:`--auth-supernode-public-key` flag expects a path to the node's public key file. For development purposes, you can generate a private and public key pair using :code:`ssh-keygen -t ecdsa -b 384`. + flower-supernode + --root-certificates certificates/ca.crt + --superlink 127.0.0.1:9092 + --auth-supernode-private-key keys/client_credentials + --auth-supernode-public-key keys/client_credentials.pub +The ``--auth-supernode-private-key`` flag expects a path to the node's private key file +and the ``--auth-supernode-public-key`` flag expects a path to the node's public key +file. For development purposes, you can generate a private and public key pair using +``ssh-keygen -t ecdsa -b 384``. Security notice --------------- -The system's security relies on the credentials of the SuperLink and each SuperNode. Therefore, it is imperative to safeguard and safely store the credentials to avoid security risks such as Public Key Infrastructure (PKI) impersonation attacks. -The node authentication mechanism also involves human interaction, so please ensure that all of the communication is done in a secure manner, using trusted communication methods. - +The system's security relies on the credentials of the SuperLink and each SuperNode. +Therefore, it is imperative to safeguard and safely store the credentials to avoid +security risks such as Public Key Infrastructure (PKI) impersonation attacks. The node +authentication mechanism also involves human interaction, so please ensure that all of +the communication is done in a secure manner, using trusted communication methods. Conclusion ---------- -You should now have learned how to start a long-running Flower server (:code:`SuperLink`) and client (:code:`SuperNode`) with node authentication enabled. You should also know the significance of the private key and store it safely to minimize security risks. +You should now have learned how to start a long-running Flower server (``SuperLink``) +and client (``SuperNode``) with node authentication enabled. You should also know the +significance of the private key and store it safely to minimize security risks. diff --git a/doc/source/how-to-configure-clients.rst b/doc/source/how-to-configure-clients.rst index ff0a2f4033df..c950ab3be9e7 100644 --- a/doc/source/how-to-configure-clients.rst +++ b/doc/source/how-to-configure-clients.rst @@ -1,37 +1,55 @@ Configure clients ================= -Along with model parameters, Flower can send configuration values to clients. Configuration values can be used for various purposes. They are, for example, a popular way to control client-side hyperparameters from the server. +Along with model parameters, Flower can send configuration values to clients. +Configuration values can be used for various purposes. They are, for example, a popular +way to control client-side hyperparameters from the server. Configuration values -------------------- -Configuration values are represented as a dictionary with ``str`` keys and values of type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or equivalent types in different languages). Here is an example of a configuration dictionary in Python: +Configuration values are represented as a dictionary with ``str`` keys and values of +type ``bool``, ``bytes``, ``double`` (64-bit precision float), ``int``, or ``str`` (or +equivalent types in different languages). Here is an example of a configuration +dictionary in Python: .. code-block:: python config_dict = { - "dropout": True, # str key, bool value + "dropout": True, # str key, bool value "learning_rate": 0.01, # str key, float value - "batch_size": 32, # str key, int value - "optimizer": "sgd", # str key, str value + "batch_size": 32, # str key, int value + "optimizer": "sgd", # str key, str value } -Flower serializes these configuration dictionaries (or *config dict* for short) to their ProtoBuf representation, transports them to the client using gRPC, and then deserializes them back to Python dictionaries. +Flower serializes these configuration dictionaries (or *config dict* for short) to their +ProtoBuf representation, transports them to the client using gRPC, and then deserializes +them back to Python dictionaries. .. note:: - Currently, there is no support for directly sending collection types (e.g., ``Set``, ``List``, ``Map``) as values in configuration dictionaries. There are several workarounds to send collections as values by converting them to one of the supported value types (and converting them back on the client-side). - - One can, for example, convert a list of floating-point numbers to a JSON string, then send the JSON string using the configuration dictionary, and then convert the JSON string back to a list of floating-point numbers on the client. + Currently, there is no support for directly sending collection types (e.g., ``Set``, + ``List``, ``Map``) as values in configuration dictionaries. There are several + workarounds to send collections as values by converting them to one of the supported + value types (and converting them back on the client-side). + One can, for example, convert a list of floating-point numbers to a JSON string, + then send the JSON string using the configuration dictionary, and then convert the + JSON string back to a list of floating-point numbers on the client. Configuration through built-in strategies ----------------------------------------- -The easiest way to send configuration values to clients is to use a built-in strategy like :code:`FedAvg`. Built-in strategies support so-called configuration functions. A configuration function is a function that the built-in strategy calls to get the configuration dictionary for the current round. It then forwards the configuration dictionary to all the clients selected during that round. +The easiest way to send configuration values to clients is to use a built-in strategy +like ``FedAvg``. Built-in strategies support so-called configuration functions. A +configuration function is a function that the built-in strategy calls to get the +configuration dictionary for the current round. It then forwards the configuration +dictionary to all the clients selected during that round. -Let's start with a simple example. Imagine we want to send (a) the batch size that the client should use, (b) the current global round of federated learning, and (c) the number of epochs to train on the client-side. Our configuration function could look like this: +Let's start with a simple example. Imagine we want to send (a) the batch size that the +client should use, (b) the current global round of federated learning, and (c) the +number of epochs to train on the client-side. Our configuration function could look like +this: .. code-block:: python @@ -44,12 +62,13 @@ Let's start with a simple example. Imagine we want to send (a) the batch size th } return config -To make the built-in strategies use this function, we can pass it to ``FedAvg`` during initialization using the parameter :code:`on_fit_config_fn`: +To make the built-in strategies use this function, we can pass it to ``FedAvg`` during +initialization using the parameter ``on_fit_config_fn``: .. code-block:: python strategy = FedAvg( - ..., # Other FedAvg parameters + ..., # Other FedAvg parameters on_fit_config_fn=fit_config, # The fit_config function we defined earlier ) @@ -64,9 +83,15 @@ One the client side, we receive the configuration dictionary in ``fit``: print(config["local_epochs"]) # Prints `2` # ... (rest of `fit` method) -There is also an `on_evaluate_config_fn` to configure evaluation, which works the same way. They are separate functions because one might want to send different configuration values to `evaluate` (for example, to use a different batch size). +There is also an `on_evaluate_config_fn` to configure evaluation, which works the same +way. They are separate functions because one might want to send different configuration +values to `evaluate` (for example, to use a different batch size). -The built-in strategies call this function every round (that is, every time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling `on_evaluate_config_fn` every round allows us to vary/change the config dict over consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to increase the number of local epochs during later rounds, we could do the following: +The built-in strategies call this function every round (that is, every time +`Strategy.configure_fit` or `Strategy.configure_evaluate` runs). Calling +`on_evaluate_config_fn` every round allows us to vary/change the config dict over +consecutive rounds. If we wanted to implement a hyperparameter schedule, for example, to +increase the number of local epochs during later rounds, we could do the following: .. code-block:: python @@ -79,14 +104,19 @@ The built-in strategies call this function every round (that is, every time `Str } return config -The :code:`FedAvg` strategy will call this function *every round*. +The ``FedAvg`` strategy will call this function *every round*. Configuring individual clients ------------------------------ -In some cases, it is necessary to send different configuration values to different clients. +In some cases, it is necessary to send different configuration values to different +clients. -This can be achieved by customizing an existing strategy or by :doc:`implementing a custom strategy from scratch `. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value): +This can be achieved by customizing an existing strategy or by :doc:`implementing a +custom strategy from scratch `. Here's a nonsensical +example that customizes ``FedAvg`` by adding a custom ``"hello": "world"`` configuration +key/value pair to the config dict of a *single client* (only the first client in the +list, the other clients in this round to not receive this "special" config value): .. code-block:: python @@ -94,7 +124,9 @@ This can be achieved by customizing an existing strategy or by :doc:`implementin def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: - client_instructions = super().configure_fit(server_round, parameters, client_manager) + client_instructions = super().configure_fit( + server_round, parameters, client_manager + ) # Add special "hello": "world" config key/value pair, # but only to the first client in the list @@ -103,6 +135,7 @@ This can be achieved by customizing an existing strategy or by :doc:`implementin return client_instructions + # Create strategy and run server strategy = CustomClientConfigStrategy( # ... (same arguments as plain FedAvg here) diff --git a/doc/source/how-to-configure-logging.rst b/doc/source/how-to-configure-logging.rst index d5559429a73c..bb7461390b42 100644 --- a/doc/source/how-to-configure-logging.rst +++ b/doc/source/how-to-configure-logging.rst @@ -1,17 +1,19 @@ Configure logging ================= -The Flower logger keeps track of all core events that take place in federated learning workloads. -It presents information by default following a standard message format: +The Flower logger keeps track of all core events that take place in federated learning +workloads. It presents information by default following a standard message format: .. code-block:: python DEFAULT_FORMATTER = logging.Formatter( - "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" + "%(levelname)s %(name)s %(asctime)s | %(filename)s:%(lineno)d | %(message)s" ) -containing relevant information including: log message level (e.g. :code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging took place from, as well as the log message itself. -In this way, the logger would typically display information on your terminal as follows: +containing relevant information including: log message level (e.g. ``INFO``, ``DEBUG``), +a timestamp, the line where the logging took place from, as well as the log message +itself. In this way, the logger would typically display information on your terminal as +follows: .. code-block:: bash @@ -29,29 +31,35 @@ In this way, the logger would typically display information on your terminal as INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) ... - Saving log to file -------------------- +------------------ -By default, the Flower log is outputted to the terminal where you launch your Federated Learning workload from. This applies for both gRPC-based federation (i.e. when you do :code:`fl.server.start_server`) and when using the :code:`VirtualClientEngine` (i.e. when you do :code:`fl.simulation.start_simulation`). -In some situations you might want to save this log to disk. You can do so by calling the `fl.common.logger.configure() `_ function. For example: +By default, the Flower log is outputted to the terminal where you launch your Federated +Learning workload from. This applies for both gRPC-based federation (i.e. when you do +``fl.server.start_server``) and when using the ``VirtualClientEngine`` (i.e. when you do +``fl.simulation.start_simulation``). In some situations you might want to save this log +to disk. You can do so by calling the `fl.common.logger.configure() +`_ function. For +example: .. code-block:: python - - import flwr as fl - - ... - # in your main file and before launching your experiment - # add an identifier to your logger - # then specify the name of the file where the log should be outputted to - fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") + import flwr as fl + + ... - # then start your workload - fl.simulation.start_simulation(...) # or fl.server.start_server(...) + # in your main file and before launching your experiment + # add an identifier to your logger + # then specify the name of the file where the log should be outputted to + fl.common.logger.configure(identifier="myFlowerExperiment", filename="log.txt") -With the above, Flower will record the log you see on your terminal to :code:`log.txt`. This file will be created in the same directory as were you are running the code from. -If we inspect we see the log above is also recorded but prefixing with :code:`identifier` each line: + # then start your workload + fl.simulation.start_simulation(...) # or fl.server.start_server(...) + +With the above, Flower will record the log you see on your terminal to ``log.txt``. This +file will be created in the same directory as were you are running the code from. If we +inspect we see the log above is also recorded but prefixing with ``identifier`` each +line: .. code-block:: bash @@ -69,12 +77,11 @@ If we inspect we see the log above is also recorded but prefixing with :code:`id myFlowerExperiment | INFO flwr 2023-07-15 15:32:36,118 | server.py:125 | fit progress: (5, 358.6936808824539, {'accuracy': 0.3467}, 18.964264554999318) ... - Log your own messages --------------------- -You might expand the information shown by default with the Flower logger by adding more messages relevant to your application. -You can achieve this easily as follows. +You might expand the information shown by default with the Flower logger by adding more +messages relevant to your application. You can achieve this easily as follows. .. code-block:: python @@ -84,25 +91,31 @@ You can achieve this easily as follows. # For example, let's say you want to add to the log some info about the training on your client for debugging purposes + class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid: int ...): + def __init__( + self, + cid: int, + # ... + ): self.cid = cid - self.net = ... - ... + self.net = net + # ... def fit(self, parameters, config): log(INFO, f"Printing a custom INFO message at the start of fit() :)") - + set_params(self.net, parameters) log(DEBUG, f"Client {self.cid} is doing fit() with config: {config}") - ... + # ... -In this way your logger will show, in addition to the default messages, the ones introduced by the clients as specified above. +In this way your logger will show, in addition to the default messages, the ones +introduced by the clients as specified above. .. code-block:: bash - + ... INFO flwr 2023-07-15 16:18:21,726 | server.py:89 | Initializing global parameters INFO flwr 2023-07-15 16:18:21,726 | server.py:276 | Requesting initial parameters from one random client @@ -123,10 +136,13 @@ In this way your logger will show, in addition to the default messages, the ones DEBUG flwr 2023-07-15 16:18:28,617 | main.py:63 | Client 13 is doing fit() with config: {'epochs': 5, 'batch_size': 64} ... - Log to a remote service ----------------------- -The :code:`fl.common.logger.configure` function, also allows specifying a host to which logs can be pushed (via :code:`POST`) through a native Python :code:`logging.handler.HTTPHandler`. -This is a particularly useful feature in :code:`gRPC`-based Federated Learning workloads where otherwise gathering logs from all entities (i.e. the server and the clients) might be cumbersome. -Note that in Flower simulation, the server automatically displays all logs. You can still specify a :code:`HTTPHandler` should you wish to backup or analyze the logs somewhere else. +The ``fl.common.logger.configure`` function, also allows specifying a host to which logs +can be pushed (via ``POST``) through a native Python ``logging.handler.HTTPHandler``. +This is a particularly useful feature in ``gRPC``-based Federated Learning workloads +where otherwise gathering logs from all entities (i.e. the server and the clients) might +be cumbersome. Note that in Flower simulation, the server automatically displays all +logs. You can still specify a ``HTTPHandler`` should you wish to backup or analyze the +logs somewhere else. diff --git a/doc/source/how-to-enable-ssl-connections.rst b/doc/source/how-to-enable-ssl-connections.rst index 1828f4ed3258..cd8590bc3436 100644 --- a/doc/source/how-to-enable-ssl-connections.rst +++ b/doc/source/how-to-enable-ssl-connections.rst @@ -1,80 +1,84 @@ Enable SSL connections ====================== -This guide describes how to a SSL-enabled secure Flower server (:code:`SuperLink`) can be started and -how a Flower client (:code:`SuperNode`) can establish a secure connections to it. +This guide describes how to a SSL-enabled secure Flower server (``SuperLink``) can be +started and how a Flower client (``SuperNode``) can establish a secure connections to +it. -A complete code example demonstrating a secure connection can be found -`here `_. - -The code example comes with a :code:`README.md` file which explains how to start it. Although it is -already SSL-enabled, it might be less descriptive on how it does so. Stick to this guide for a deeper -introduction to the topic. +A complete code example demonstrating a secure connection can be found `here +`_. +The code example comes with a ``README.md`` file which explains how to start it. +Although it is already SSL-enabled, it might be less descriptive on how it does so. +Stick to this guide for a deeper introduction to the topic. Certificates ------------ -Using SSL-enabled connections requires certificates to be passed to the server and client. For -the purpose of this guide we are going to generate self-signed certificates. As this can become -quite complex we are going to ask you to run the script in -:code:`examples/advanced-tensorflow/certificates/generate.sh` -with the following command sequence: +Using SSL-enabled connections requires certificates to be passed to the server and +client. For the purpose of this guide we are going to generate self-signed certificates. +As this can become quite complex we are going to ask you to run the script in +``examples/advanced-tensorflow/certificates/generate.sh`` with the following command +sequence: .. code-block:: bash - cd examples/advanced-tensorflow/certificates - ./generate.sh - -This will generate the certificates in :code:`examples/advanced-tensorflow/.cache/certificates`. + cd examples/advanced-tensorflow/certificates + ./generate.sh -The approach for generating SSL certificates in the context of this example can serve as an inspiration and -starting point, but it should not be used as a reference for production environments. Please refer to other -sources regarding the issue of correctly generating certificates for production environments. -For non-critical prototyping or research projects, it might be sufficient to use the self-signed certificates generated using -the scripts mentioned in this guide. +This will generate the certificates in +``examples/advanced-tensorflow/.cache/certificates``. +The approach for generating SSL certificates in the context of this example can serve as +an inspiration and starting point, but it should not be used as a reference for +production environments. Please refer to other sources regarding the issue of correctly +generating certificates for production environments. For non-critical prototyping or +research projects, it might be sufficient to use the self-signed certificates generated +using the scripts mentioned in this guide. Server (SuperLink) ------------------ -Use the following terminal command to start a sever (SuperLink) that uses the previously generated certificates: +Use the following terminal command to start a sever (SuperLink) that uses the previously +generated certificates: .. code-block:: bash - flower-superlink - --ssl-ca-certfile certificates/ca.crt - --ssl-certfile certificates/server.pem + flower-superlink + --ssl-ca-certfile certificates/ca.crt + --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key -When providing certificates, the server expects a tuple of three certificates paths: CA certificate, server certificate and server private key. - +When providing certificates, the server expects a tuple of three certificates paths: CA +certificate, server certificate and server private key. Client (SuperNode) ------------------ -Use the following terminal command to start a client (SuperNode) that uses the previously generated certificates: +Use the following terminal command to start a client (SuperNode) that uses the +previously generated certificates: .. code-block:: bash - flower-client-app client:app - --root-certificates certificates/ca.crt - --superlink 127.0.0.1:9092 - -When setting :code:`root_certificates`, the client expects a file path to PEM-encoded root certificates. + flower-supernode + --root-certificates certificates/ca.crt + --superlink 127.0.0.1:9092 +When setting ``root_certificates``, the client expects a file path to PEM-encoded root +certificates. Conclusion ---------- -You should now have learned how to generate self-signed certificates using the given script, start an -SSL-enabled server and have a client establish a secure connection to it. - +You should now have learned how to generate self-signed certificates using the given +script, start an SSL-enabled server and have a client establish a secure connection to +it. Additional resources -------------------- -These additional sources might be relevant if you would like to dive deeper into the topic of certificates: +These additional sources might be relevant if you would like to dive deeper into the +topic of certificates: -* `Let's Encrypt `_ -* `certbot `_ +- `Let's Encrypt `_ +- `certbot `_ diff --git a/doc/source/how-to-implement-strategies.rst b/doc/source/how-to-implement-strategies.rst index 01bbb3042973..075d8a0116c4 100644 --- a/doc/source/how-to-implement-strategies.rst +++ b/doc/source/how-to-implement-strategies.rst @@ -1,22 +1,21 @@ Implement strategies ==================== -The strategy abstraction enables implementation of fully custom strategies. A -strategy is basically the federated learning algorithm that runs on the server. -Strategies decide how to sample clients, how to configure clients for training, -how to aggregate updates, and how to evaluate models. Flower provides a few -built-in strategies which are based on the same API described below. +The strategy abstraction enables implementation of fully custom strategies. A strategy +is basically the federated learning algorithm that runs on the server. Strategies decide +how to sample clients, how to configure clients for training, how to aggregate updates, +and how to evaluate models. Flower provides a few built-in strategies which are based on +the same API described below. -The :code:`Strategy` abstraction --------------------------------- +The ``Strategy`` abstraction +---------------------------- All strategy implementation are derived from the abstract base class -:code:`flwr.server.strategy.Strategy`, both built-in implementations and third -party implementations. This means that custom strategy implementations have the -exact same capabilities at their disposal as built-in ones. +``flwr.server.strategy.Strategy``, both built-in implementations and third party +implementations. This means that custom strategy implementations have the exact same +capabilities at their disposal as built-in ones. -The strategy abstraction defines a few abstract methods that need to be -implemented: +The strategy abstraction defines a few abstract methods that need to be implemented: .. code-block:: python @@ -31,10 +30,7 @@ implemented: @abstractmethod def configure_fit( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" @@ -49,10 +45,7 @@ implemented: @abstractmethod def configure_evaluate( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" @@ -71,31 +64,35 @@ implemented: ) -> Optional[Tuple[float, Dict[str, Scalar]]]: """Evaluate the current model parameters.""" - -Creating a new strategy means implementing a new :code:`class` (derived from the -abstract base class :code:`Strategy`) that implements for the previously shown -abstract methods: +Creating a new strategy means implementing a new ``class`` (derived from the abstract +base class ``Strategy``) that implements for the previously shown abstract methods: .. code-block:: python class SotaStrategy(Strategy): def initialize_parameters(self, client_manager): # Your implementation here + pass def configure_fit(self, server_round, parameters, client_manager): # Your implementation here + pass def aggregate_fit(self, server_round, results, failures): # Your implementation here + pass def configure_evaluate(self, server_round, parameters, client_manager): # Your implementation here + pass def aggregate_evaluate(self, server_round, results, failures): # Your implementation here + pass def evaluate(self, parameters): # Your implementation here + pass The Flower server calls these methods in the following order: @@ -176,12 +173,15 @@ The Flower server calls these methods in the following order: The following sections describe each of those methods in more detail. -The :code:`initialize_parameters` method ----------------------------------------- +The ``initialize_parameters`` method +------------------------------------ -:code:`initialize_parameters` is called only once, at the very beginning of an execution. It is responsible for providing the initial global model parameters in a serialized form (i.e., as a :code:`Parameters` object). +``initialize_parameters`` is called only once, at the very beginning of an execution. It +is responsible for providing the initial global model parameters in a serialized form +(i.e., as a ``Parameters`` object). -Built-in strategies return user-provided initial parameters. The following example shows how initial parameters can be passed to :code:`FedAvg`: +Built-in strategies return user-provided initial parameters. The following example shows +how initial parameters can be passed to ``FedAvg``: .. code-block:: python @@ -200,49 +200,68 @@ Built-in strategies return user-provided initial parameters. The following examp # Serialize ndarrays to `Parameters` parameters = fl.common.ndarrays_to_parameters(weights) - # Use the serialized parameters as the initial global parameters + # Use the serialized parameters as the initial global parameters strategy = fl.server.strategy.FedAvg( initial_parameters=parameters, ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -The Flower server will call :code:`initialize_parameters`, which either returns the parameters that were passed to :code:`initial_parameters`, or :code:`None`. If no parameters are returned from :code:`initialize_parameters` (i.e., :code:`None`), the server will randomly select one client and ask it to provide its parameters. This is a convenience feature and not recommended in practice, but it can be useful for prototyping. In practice, it is recommended to always use server-side parameter initialization. +The Flower server will call ``initialize_parameters``, which either returns the +parameters that were passed to ``initial_parameters``, or ``None``. If no parameters are +returned from ``initialize_parameters`` (i.e., ``None``), the server will randomly +select one client and ask it to provide its parameters. This is a convenience feature +and not recommended in practice, but it can be useful for prototyping. In practice, it +is recommended to always use server-side parameter initialization. .. note:: - Server-side parameter initialization is a powerful mechanism. It can be used, for example, to resume training from a previously saved checkpoint. It is also the fundamental capability needed to implement hybrid approaches, for example, to fine-tune a pre-trained model using federated learning. + Server-side parameter initialization is a powerful mechanism. It can be used, for + example, to resume training from a previously saved checkpoint. It is also the + fundamental capability needed to implement hybrid approaches, for example, to + fine-tune a pre-trained model using federated learning. -The :code:`configure_fit` method --------------------------------- +The ``configure_fit`` method +---------------------------- -:code:`configure_fit` is responsible for configuring the upcoming round of training. What does *configure* mean in this context? Configuring a round means selecting clients and deciding what instructions to send to these clients. The signature of :code:`configure_fit` makes this clear: +``configure_fit`` is responsible for configuring the upcoming round of training. What +does *configure* mean in this context? Configuring a round means selecting clients and +deciding what instructions to send to these clients. The signature of ``configure_fit`` +makes this clear: .. code-block:: python @abstractmethod def configure_fit( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" -The return value is a list of tuples, each representing the instructions that will be sent to a particular client. Strategy implementations usually perform the following steps in :code:`configure_fit`: +The return value is a list of tuples, each representing the instructions that will be +sent to a particular client. Strategy implementations usually perform the following +steps in ``configure_fit``: -* Use the :code:`client_manager` to randomly sample all (or a subset of) available clients (each represented as a :code:`ClientProxy` object) -* Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the current global model :code:`parameters` and :code:`config` dict +- Use the ``client_manager`` to randomly sample all (or a subset of) available clients + (each represented as a ``ClientProxy`` object) +- Pair each ``ClientProxy`` with the same ``FitIns`` holding the current global model + ``parameters`` and ``config`` dict -More sophisticated implementations can use :code:`configure_fit` to implement custom client selection logic. A client will only participate in a round if the corresponding :code:`ClientProxy` is included in the list returned from :code:`configure_fit`. +More sophisticated implementations can use ``configure_fit`` to implement custom client +selection logic. A client will only participate in a round if the corresponding +``ClientProxy`` is included in the list returned from ``configure_fit``. .. note:: - The structure of this return value provides a lot of flexibility to the user. Since instructions are defined on a per-client basis, different instructions can be sent to each client. This enables custom strategies to train, for example, different models on different clients, or use different hyperparameters on different clients (via the :code:`config` dict). + The structure of this return value provides a lot of flexibility to the user. Since + instructions are defined on a per-client basis, different instructions can be sent + to each client. This enables custom strategies to train, for example, different + models on different clients, or use different hyperparameters on different clients + (via the ``config`` dict). -The :code:`aggregate_fit` method --------------------------------- +The ``aggregate_fit`` method +---------------------------- -:code:`aggregate_fit` is responsible for aggregating the results returned by the clients that were selected and asked to train in :code:`configure_fit`. +``aggregate_fit`` is responsible for aggregating the results returned by the clients +that were selected and asked to train in ``configure_fit``. .. code-block:: python @@ -255,42 +274,58 @@ The :code:`aggregate_fit` method ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: """Aggregate training results.""" -Of course, failures can happen, so there is no guarantee that the server will get results from all the clients it sent instructions to (via :code:`configure_fit`). :code:`aggregate_fit` therefore receives a list of :code:`results`, but also a list of :code:`failures`. +Of course, failures can happen, so there is no guarantee that the server will get +results from all the clients it sent instructions to (via ``configure_fit``). +``aggregate_fit`` therefore receives a list of ``results``, but also a list of +``failures``. -:code:`aggregate_fit` returns an optional :code:`Parameters` object and a dictionary of aggregated metrics. The :code:`Parameters` return value is optional because :code:`aggregate_fit` might decide that the results provided are not sufficient for aggregation (e.g., too many failures). +``aggregate_fit`` returns an optional ``Parameters`` object and a dictionary of +aggregated metrics. The ``Parameters`` return value is optional because +``aggregate_fit`` might decide that the results provided are not sufficient for +aggregation (e.g., too many failures). -The :code:`configure_evaluate` method -------------------------------------- +The ``configure_evaluate`` method +--------------------------------- -:code:`configure_evaluate` is responsible for configuring the upcoming round of evaluation. What does *configure* mean in this context? Configuring a round means selecting clients and deciding what instructions to send to these clients. The signature of :code:`configure_evaluate` makes this clear: +``configure_evaluate`` is responsible for configuring the upcoming round of evaluation. +What does *configure* mean in this context? Configuring a round means selecting clients +and deciding what instructions to send to these clients. The signature of +``configure_evaluate`` makes this clear: .. code-block:: python @abstractmethod def configure_evaluate( - self, - server_round: int, - parameters: Parameters, - client_manager: ClientManager + self, server_round: int, parameters: Parameters, client_manager: ClientManager ) -> List[Tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" -The return value is a list of tuples, each representing the instructions that will be sent to a particular client. Strategy implementations usually perform the following steps in :code:`configure_evaluate`: +The return value is a list of tuples, each representing the instructions that will be +sent to a particular client. Strategy implementations usually perform the following +steps in ``configure_evaluate``: -* Use the :code:`client_manager` to randomly sample all (or a subset of) available clients (each represented as a :code:`ClientProxy` object) -* Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding the current global model :code:`parameters` and :code:`config` dict +- Use the ``client_manager`` to randomly sample all (or a subset of) available clients + (each represented as a ``ClientProxy`` object) +- Pair each ``ClientProxy`` with the same ``EvaluateIns`` holding the current global + model ``parameters`` and ``config`` dict -More sophisticated implementations can use :code:`configure_evaluate` to implement custom client selection logic. A client will only participate in a round if the corresponding :code:`ClientProxy` is included in the list returned from :code:`configure_evaluate`. +More sophisticated implementations can use ``configure_evaluate`` to implement custom +client selection logic. A client will only participate in a round if the corresponding +``ClientProxy`` is included in the list returned from ``configure_evaluate``. .. note:: - The structure of this return value provides a lot of flexibility to the user. Since instructions are defined on a per-client basis, different instructions can be sent to each client. This enables custom strategies to evaluate, for example, different models on different clients, or use different hyperparameters on different clients (via the :code:`config` dict). - + The structure of this return value provides a lot of flexibility to the user. Since + instructions are defined on a per-client basis, different instructions can be sent + to each client. This enables custom strategies to evaluate, for example, different + models on different clients, or use different hyperparameters on different clients + (via the ``config`` dict). -The :code:`aggregate_evaluate` method -------------------------------------- +The ``aggregate_evaluate`` method +--------------------------------- -:code:`aggregate_evaluate` is responsible for aggregating the results returned by the clients that were selected and asked to evaluate in :code:`configure_evaluate`. +``aggregate_evaluate`` is responsible for aggregating the results returned by the +clients that were selected and asked to evaluate in ``configure_evaluate``. .. code-block:: python @@ -303,21 +338,29 @@ The :code:`aggregate_evaluate` method ) -> Tuple[Optional[float], Dict[str, Scalar]]: """Aggregate evaluation results.""" -Of course, failures can happen, so there is no guarantee that the server will get results from all the clients it sent instructions to (via :code:`configure_evaluate`). :code:`aggregate_evaluate` therefore receives a list of :code:`results`, but also a list of :code:`failures`. +Of course, failures can happen, so there is no guarantee that the server will get +results from all the clients it sent instructions to (via ``configure_evaluate``). +``aggregate_evaluate`` therefore receives a list of ``results``, but also a list of +``failures``. -:code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a dictionary of aggregated metrics. The :code:`float` return value is optional because :code:`aggregate_evaluate` might decide that the results provided are not sufficient for aggregation (e.g., too many failures). +``aggregate_evaluate`` returns an optional ``float`` (loss) and a dictionary of +aggregated metrics. The ``float`` return value is optional because +``aggregate_evaluate`` might decide that the results provided are not sufficient for +aggregation (e.g., too many failures). -The :code:`evaluate` method ---------------------------- +The ``evaluate`` method +----------------------- -:code:`evaluate` is responsible for evaluating model parameters on the server-side. Having :code:`evaluate` in addition to :code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies to perform both servers-side and client-side (federated) evaluation. +``evaluate`` is responsible for evaluating model parameters on the server-side. Having +``evaluate`` in addition to ``configure_evaluate``/``aggregate_evaluate`` enables +strategies to perform both servers-side and client-side (federated) evaluation. .. code-block:: python @abstractmethod - def evaluate( - self, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]: """Evaluate the current model parameters.""" -The return value is again optional because the strategy might not need to implement server-side evaluation or because the user-defined :code:`evaluate` method might not complete successfully (e.g., it might fail to load the server-side evaluation data). +The return value is again optional because the strategy might not need to implement +server-side evaluation or because the user-defined ``evaluate`` method might not +complete successfully (e.g., it might fail to load the server-side evaluation data). diff --git a/doc/source/how-to-install-flower.rst b/doc/source/how-to-install-flower.rst index d773e6999245..89cdf8b836cf 100644 --- a/doc/source/how-to-install-flower.rst +++ b/doc/source/how-to-install-flower.rst @@ -1,12 +1,11 @@ Install Flower ============== - Python version -------------- -Flower requires at least `Python 3.8 `_, but `Python 3.10 `_ or above is recommended. - +Flower requires at least `Python 3.9 `_, but `Python 3.10 +`_ or above is recommended. Install stable release ---------------------- @@ -14,45 +13,56 @@ Install stable release Using pip ~~~~~~~~~ -Stable releases are available on `PyPI `_:: +Stable releases are available on `PyPI `_: + +:: - python -m pip install flwr + python -m pip install flwr -For simulations that use the Virtual Client Engine, ``flwr`` should be installed with the ``simulation`` extra:: +For simulations that use the Virtual Client Engine, ``flwr`` should be installed with +the ``simulation`` extra: - python -m pip install "flwr[simulation]" +:: + python -m pip install "flwr[simulation]" Using conda (or mamba) ~~~~~~~~~~~~~~~~~~~~~~ Flower can also be installed from the ``conda-forge`` channel. -If you have not added ``conda-forge`` to your channels, you will first need to run the following:: +If you have not added ``conda-forge`` to your channels, you will first need to run the +following: + +:: + + conda config --add channels conda-forge + conda config --set channel_priority strict - conda config --add channels conda-forge - conda config --set channel_priority strict +Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with +``conda``: -Once the ``conda-forge`` channel has been enabled, ``flwr`` can be installed with ``conda``:: +:: - conda install flwr + conda install flwr -or with ``mamba``:: +or with ``mamba``: - mamba install flwr +:: + mamba install flwr Verify installation ------------------- -The following command can be used to verify if Flower was successfully installed. If everything worked, it should print the version of Flower to the command line: +The following command can be used to verify if Flower was successfully installed. If +everything worked, it should print the version of Flower to the command line: .. code-block:: bash - :substitutions: - - python -c "import flwr;print(flwr.__version__)" - |stable_flwr_version| + :substitutions: + python -c "import flwr;print(flwr.__version__)" + |stable_flwr_version| Advanced installation options ----------------------------- @@ -65,21 +75,32 @@ Install via Docker Install pre-release ~~~~~~~~~~~~~~~~~~~ -New (possibly unstable) versions of Flower are sometimes available as pre-release versions (alpha, beta, release candidate) before the stable release happens:: +New (possibly unstable) versions of Flower are sometimes available as pre-release +versions (alpha, beta, release candidate) before the stable release happens: + +:: + + python -m pip install -U --pre flwr - python -m pip install -U --pre flwr +For simulations that use the Virtual Client Engine, ``flwr`` pre-releases should be +installed with the ``simulation`` extra: -For simulations that use the Virtual Client Engine, ``flwr`` pre-releases should be installed with the ``simulation`` extra:: +:: - python -m pip install -U --pre 'flwr[simulation]' + python -m pip install -U --pre 'flwr[simulation]' Install nightly release ~~~~~~~~~~~~~~~~~~~~~~~ -The latest (potentially unstable) changes in Flower are available as nightly releases:: +The latest (potentially unstable) changes in Flower are available as nightly releases: + +:: + + python -m pip install -U flwr-nightly - python -m pip install -U flwr-nightly +For simulations that use the Virtual Client Engine, ``flwr-nightly`` should be installed +with the ``simulation`` extra: -For simulations that use the Virtual Client Engine, ``flwr-nightly`` should be installed with the ``simulation`` extra:: +:: - python -m pip install -U flwr-nightly[simulation] + python -m pip install -U flwr-nightly[simulation] diff --git a/doc/source/how-to-monitor-simulation.rst b/doc/source/how-to-monitor-simulation.rst index f6c26a701d94..f540e22a6a77 100644 --- a/doc/source/how-to-monitor-simulation.rst +++ b/doc/source/how-to-monitor-simulation.rst @@ -1,109 +1,120 @@ Monitor simulation ================== -Flower allows you to monitor system resources while running your simulation. Moreover, the Flower simulation engine is powerful and enables you to decide how to allocate resources per client manner and constrain the total usage. Insights from resource consumption can help you make smarter decisions and speed up the execution time. - -The specific instructions assume you are using macOS and have the `Homebrew `_ package manager installed. +Flower allows you to monitor system resources while running your simulation. Moreover, +the Flower simulation engine is powerful and enables you to decide how to allocate +resources per client manner and constrain the total usage. Insights from resource +consumption can help you make smarter decisions and speed up the execution time. +The specific instructions assume you are using macOS and have the `Homebrew +`_ package manager installed. Downloads --------- .. code-block:: bash - brew install prometheus grafana + brew install prometheus grafana -`Prometheus `_ is used for data collection, while `Grafana `_ will enable you to visualize the collected data. They are both well integrated with `Ray `_ which Flower uses under the hood. +`Prometheus `_ is used for data collection, while `Grafana +`_ will enable you to visualize the collected data. They are both +well integrated with `Ray `_ which Flower uses under the hood. -Overwrite the configuration files (depending on your device, it might be installed on a different path). +Overwrite the configuration files (depending on your device, it might be installed on a +different path). If you are on an M1 Mac, it should be: .. code-block:: bash - /opt/homebrew/etc/prometheus.yml - /opt/homebrew/etc/grafana/grafana.ini + /opt/homebrew/etc/prometheus.yml + /opt/homebrew/etc/grafana/grafana.ini On the previous generation Intel Mac devices, it should be: .. code-block:: bash - /usr/local/etc/prometheus.yml - /usr/local/etc/grafana/grafana.ini + /usr/local/etc/prometheus.yml + /usr/local/etc/grafana/grafana.ini -Open the respective configuration files and change them. Depending on your device, use one of the two following commands: +Open the respective configuration files and change them. Depending on your device, use +one of the two following commands: .. code-block:: bash - # M1 macOS - open /opt/homebrew/etc/prometheus.yml + # M1 macOS + open /opt/homebrew/etc/prometheus.yml - # Intel macOS - open /usr/local/etc/prometheus.yml + # Intel macOS + open /usr/local/etc/prometheus.yml -and then delete all the text in the file and paste a new Prometheus config you see below. You may adjust the time intervals to your requirements: +and then delete all the text in the file and paste a new Prometheus config you see +below. You may adjust the time intervals to your requirements: .. code-block:: bash - global: - scrape_interval: 1s - evaluation_interval: 1s + global: + scrape_interval: 1s + evaluation_interval: 1s - scrape_configs: - # Scrape from each ray node as defined in the service_discovery.json provided by ray. - - job_name: 'ray' - file_sd_configs: - - files: - - '/tmp/ray/prom_metrics_service_discovery.json' + scrape_configs: + # Scrape from each ray node as defined in the service_discovery.json provided by ray. + - job_name: 'ray' + file_sd_configs: + - files: + - '/tmp/ray/prom_metrics_service_discovery.json' -Now after you have edited the Prometheus configuration, do the same with the Grafana configuration files. Open those using one of the following commands as before: +Now after you have edited the Prometheus configuration, do the same with the Grafana +configuration files. Open those using one of the following commands as before: .. code-block:: python - # M1 macOS - open /opt/homebrew/etc/grafana/grafana.ini + # M1 macOS + open / opt / homebrew / etc / grafana / grafana.ini - # Intel macOS - open /usr/local/etc/grafana/grafana.ini + # Intel macOS + open / usr / local / etc / grafana / grafana.ini -Your terminal editor should open and allow you to apply the following configuration as before. +Your terminal editor should open and allow you to apply the following configuration as +before. .. code-block:: bash - [security] - allow_embedding = true - - [auth.anonymous] - enabled = true - org_name = Main Org. - org_role = Viewer + [security] + allow_embedding = true - [paths] - provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning + [auth.anonymous] + enabled = true + org_name = Main Org. + org_role = Viewer -Congratulations, you just downloaded all the necessary software needed for metrics tracking. Now, let’s start it. + [paths] + provisioning = /tmp/ray/session_latest/metrics/grafana/provisioning +Congratulations, you just downloaded all the necessary software needed for metrics +tracking. Now, let’s start it. Tracking metrics ---------------- -Before running your Flower simulation, you have to start the monitoring tools you have just installed and configured. +Before running your Flower simulation, you have to start the monitoring tools you have +just installed and configured. .. code-block:: bash - brew services start prometheus - brew services start grafana + brew services start prometheus + brew services start grafana Please include the following argument in your Python code when starting a simulation. .. code-block:: python - fl.simulation.start_simulation( - # ... - # all the args you used before - # ... - ray_init_args = {"include_dashboard": True} - ) + fl.simulation.start_simulation( + # ... + # all the args you used before + # ... + ray_init_args={"include_dashboard": True} + ) Now, you are ready to start your workload. @@ -111,126 +122,140 @@ Shortly after the simulation starts, you should see the following logs in your t .. code-block:: bash - 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 - + 2023-01-20 16:22:58,620 INFO [worker.py:1529](http://worker.py:1529/) -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 -You can look at everything at ``_ . +You can look at everything at http://127.0.0.1:8265 . -It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest option). +It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the lowest +option). -Or alternatively, you can just see them in Grafana by clicking on the right-up corner, “View in Grafana”. Please note that the Ray dashboard is only accessible during the simulation. After the simulation ends, you can only use Grafana to explore the metrics. You can start Grafana by going to ``http://localhost:3000/``. +Or alternatively, you can just see them in Grafana by clicking on the right-up corner, +“View in Grafana”. Please note that the Ray dashboard is only accessible during the +simulation. After the simulation ends, you can only use Grafana to explore the metrics. +You can start Grafana by going to ``http://localhost:3000/``. -After you finish the visualization, stop Prometheus and Grafana. This is important as they will otherwise block, for example port :code:`3000` on your machine as long as they are running. +After you finish the visualization, stop Prometheus and Grafana. This is important as +they will otherwise block, for example port ``3000`` on your machine as long as they are +running. .. code-block:: bash - brew services stop prometheus - brew services stop grafana - + brew services stop prometheus + brew services stop grafana Resource allocation ------------------- -You must understand how the Ray library works to efficiently allocate system resources to simulation clients on your own. +You must understand how the Ray library works to efficiently allocate system resources +to simulation clients on your own. -Initially, the simulation (which Ray handles under the hood) starts by default with all the available resources on the system, which it shares among the clients. It doesn't mean it divides it equally among all of them, nor that the model training happens at all of them simultaneously. You will learn more about that in the later part of this blog. You can check the system resources by running the following: +Initially, the simulation (which Ray handles under the hood) starts by default with all +the available resources on the system, which it shares among the clients. It doesn't +mean it divides it equally among all of them, nor that the model training happens at all +of them simultaneously. You will learn more about that in the later part of this blog. +You can check the system resources by running the following: .. code-block:: python - import ray - ray.available_resources() + import ray + + ray.available_resources() In Google Colab, the result you see might be similar to this: .. code-block:: bash - {'memory': 8020104807.0, - 'GPU': 1.0, - 'object_store_memory': 4010052403.0, - 'CPU': 2.0, - 'accelerator_type:T4': 1.0, - 'node:172.28.0.2': 1.0} + {'memory': 8020104807.0, + 'GPU': 1.0, + 'object_store_memory': 4010052403.0, + 'CPU': 2.0, + 'accelerator_type:T4': 1.0, + 'node:172.28.0.2': 1.0} - -However, you can overwrite the defaults. When starting a simulation, do the following (you don't need to overwrite all of them): +However, you can overwrite the defaults. When starting a simulation, do the following +(you don't need to overwrite all of them): .. code-block:: python - num_cpus = 2 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args = { - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - } - ) - + num_cpus = 2 + num_gpus = 1 + ram_memory = 16_000 * 1024 * 1024 # 16 GB + fl.simulation.start_simulation( + # ... + # all the args you were specifying before + # ... + ray_init_args={ + "include_dashboard": True, # we need this one for tracking + "num_cpus": num_cpus, + "num_gpus": num_gpus, + "memory": ram_memory, + } + ) Let’s also specify the resource for a single client. .. code-block:: python - # Total resources for simulation - num_cpus = 4 - num_gpus = 1 - ram_memory = 16_000 * 1024 * 1024 # 16 GB - - # Single client resources - client_num_cpus = 2 - client_num_gpus = 1 - - fl.simulation.start_simulation( - # ... - # all the args you were specifying before - # ... - ray_init_args = { - "include_dashboard": True, # we need this one for tracking - "num_cpus": num_cpus, - "num_gpus": num_gpus, - "memory": ram_memory, - }, - # The argument below is new - client_resources = { - "num_cpus": client_num_cpus, - "num_gpus": client_num_gpus, - } - ) - -Now comes the crucial part. Ray will start a new client only when it has all the required resources (such that they run in parallel) when the resources allow. - -In the example above, only one client will be run, so your clients won't run concurrently. Setting :code:`client_num_gpus = 0.5` would allow running two clients and therefore enable them to run concurrently. -Be careful not to require more resources than available. If you specified :code:`client_num_gpus = 2`, the simulation wouldn't start (even if you had 2 GPUs but decided to set 1 in :code:`ray_init_args`). - + # Total resources for simulation + num_cpus = 4 + num_gpus = 1 + ram_memory = 16_000 * 1024 * 1024 # 16 GB + + # Single client resources + client_num_cpus = 2 + client_num_gpus = 1 + + fl.simulation.start_simulation( + # ... + # all the args you were specifying before + # ... + ray_init_args={ + "include_dashboard": True, # we need this one for tracking + "num_cpus": num_cpus, + "num_gpus": num_gpus, + "memory": ram_memory, + }, + # The argument below is new + client_resources={ + "num_cpus": client_num_cpus, + "num_gpus": client_num_gpus, + }, + ) + +Now comes the crucial part. Ray will start a new client only when it has all the +required resources (such that they run in parallel) when the resources allow. + +In the example above, only one client will be run, so your clients won't run +concurrently. Setting ``client_num_gpus = 0.5`` would allow running two clients and +therefore enable them to run concurrently. Be careful not to require more resources than +available. If you specified ``client_num_gpus = 2``, the simulation wouldn't start (even +if you had 2 GPUs but decided to set 1 in ``ray_init_args``). FAQ --- Q: I don't see any metrics logged. -A: The timeframe might not be properly set. The setting is in the top right corner ("Last 30 minutes" by default). Please change the timeframe to reflect the period when the simulation was running. +A: The timeframe might not be properly set. The setting is in the top right corner +("Last 30 minutes" by default). Please change the timeframe to reflect the period when +the simulation was running. -Q: I see “Grafana server not detected. Please make sure the Grafana server is running and refresh this page” after going to the Metrics tab in Ray Dashboard. +Q: I see “Grafana server not detected. Please make sure the Grafana server is running +and refresh this page” after going to the Metrics tab in Ray Dashboard. A: You probably don't have Grafana running. Please check the running services .. code-block:: bash - brew services list + brew services list -Q: I see "This site can't be reached" when going to ``_. +Q: I see "This site can't be reached" when going to http://127.0.0.1:8265. A: Either the simulation has already finished, or you still need to start Prometheus. - Resources --------- -Ray Dashboard: ``_ +Ray Dashboard: https://docs.ray.io/en/latest/ray-observability/getting-started.html -Ray Metrics: ``_ +Ray Metrics: https://docs.ray.io/en/latest/cluster/metrics.html diff --git a/doc/source/how-to-run-simulations.rst b/doc/source/how-to-run-simulations.rst index d1dcb511ed51..fb4eed17b4e7 100644 --- a/doc/source/how-to-run-simulations.rst +++ b/doc/source/how-to-run-simulations.rst @@ -1,48 +1,85 @@ Run simulations =============== -.. youtube:: cRebUIGB5RU - :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB - :width: 100% - -Simulating Federated Learning workloads is useful for a multitude of use-cases: you might want to run your workload on a large cohort of clients but without having to source, configure and mange a large number of physical devices; you might want to run your FL workloads as fast as possible on the compute systems you have access to without having to go through a complex setup process; you might want to validate your algorithm on different scenarios at varying levels of data and system heterogeneity, client availability, privacy budgets, etc. These are among some of the use-cases where simulating FL workloads makes sense. Flower can accommodate these scenarios by means of its `VirtualClientEngine `_ or VCE. - -The :code:`VirtualClientEngine` schedules, launches and manages `virtual` clients. These clients are identical to `non-virtual` clients (i.e. the ones you launch via the command `flwr.client.start_client `_) in the sense that they can be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient `_ and therefore behave in an identical way. In addition to that, clients managed by the :code:`VirtualClientEngine` are: - -* resource-aware: this means that each client gets assigned a portion of the compute and memory on your system. You as a user can control this at the beginning of the simulation and allows you to control the degree of parallelism of your Flower FL simulation. The fewer the resources per client, the more clients can run concurrently on the same hardware. -* self-managed: this means that you as a user do not need to launch clients manually, instead this gets delegated to :code:`VirtualClientEngine`'s internals. -* ephemeral: this means that a client is only materialized when it is required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards, releasing the resources it was assigned and allowing in this way other clients to participate. - -The :code:`VirtualClientEngine` implements `virtual` clients using `Ray `_, an open-source framework for scalable Python workloads. In particular, Flower's :code:`VirtualClientEngine` makes use of `Actors `_ to spawn `virtual` clients and run their workload. - +.. youtube:: cRebUIGB5RU + :url_parameters: ?list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB + :width: 100% + +Simulating Federated Learning workloads is useful for a multitude of use-cases: you +might want to run your workload on a large cohort of clients but without having to +source, configure and mange a large number of physical devices; you might want to run +your FL workloads as fast as possible on the compute systems you have access to without +having to go through a complex setup process; you might want to validate your algorithm +on different scenarios at varying levels of data and system heterogeneity, client +availability, privacy budgets, etc. These are among some of the use-cases where +simulating FL workloads makes sense. Flower can accommodate these scenarios by means of +its `VirtualClientEngine +`_ or VCE. + +The ``VirtualClientEngine`` schedules, launches and manages `virtual` clients. These +clients are identical to `non-virtual` clients (i.e. the ones you launch via the command +`flwr.client.start_client `_) in the sense that they can +be configure by creating a class inheriting, for example, from `flwr.client.NumPyClient +`_ and therefore behave in an identical way. +In addition to that, clients managed by the ``VirtualClientEngine`` are: + +- resource-aware: this means that each client gets assigned a portion of the compute and + memory on your system. You as a user can control this at the beginning of the + simulation and allows you to control the degree of parallelism of your Flower FL + simulation. The fewer the resources per client, the more clients can run concurrently + on the same hardware. +- self-managed: this means that you as a user do not need to launch clients manually, + instead this gets delegated to ``VirtualClientEngine``'s internals. +- ephemeral: this means that a client is only materialized when it is required in the FL + process (e.g. to do `fit() `_). The object + is destroyed afterwards, releasing the resources it was assigned and allowing in this + way other clients to participate. + +The ``VirtualClientEngine`` implements `virtual` clients using `Ray +`_, an open-source framework for scalable Python workloads. In +particular, Flower's ``VirtualClientEngine`` makes use of `Actors +`_ to spawn `virtual` clients and +run their workload. Launch your Flower simulation ----------------------------- -Running Flower simulations still require you to define your client class, a strategy, and utility functions to download and load (and potentially partition) your dataset. With that out of the way, launching your simulation is done with `start_simulation `_ and a minimal example looks as follows: - +Running Flower simulations still require you to define your client class, a strategy, +and utility functions to download and load (and potentially partition) your dataset. +With that out of the way, launching your simulation is done with `start_simulation +`_ and a minimal example looks as +follows: .. code-block:: python import flwr as fl from flwr.server.strategy import FedAvg - + + def client_fn(cid: str): # Return a standard Flower client return MyFlowerClient().to_client() + # Launch the simulation hist = fl.simulation.start_simulation( - client_fn=client_fn, # A function to run a _virtual_ client when required - num_clients=50, # Total number of clients available - config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds - strategy=FedAvg() # A Flower strategy + client_fn=client_fn, # A function to run a _virtual_ client when required + num_clients=50, # Total number of clients available + config=fl.server.ServerConfig(num_rounds=3), # Specify number of FL rounds + strategy=FedAvg(), # A Flower strategy ) - VirtualClientEngine resources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) since that is also the default behavior when starting Ray. However, in some settings you might want to limit how many of your system resources are used for simulation. You can do this via the :code:`ray_init_args` input argument to :code:`start_simulation` which the VCE internally passes to Ray's :code:`ray.init` command. For a complete list of settings you can configure check the `ray.init `_ documentation. Do not set :code:`ray_init_args` if you want the VCE to use all your system's CPUs and GPUs. + +By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, etc) +since that is also the default behavior when starting Ray. However, in some settings you +might want to limit how many of your system resources are used for simulation. You can +do this via the ``ray_init_args`` input argument to ``start_simulation`` which the VCE +internally passes to Ray's ``ray.init`` command. For a complete list of settings you can +configure check the `ray.init +`_ documentation. +Do not set ``ray_init_args`` if you want the VCE to use all your system's CPUs and GPUs. .. code-block:: python @@ -50,22 +87,28 @@ By default the VCE has access to all system resources (i.e. all CPUs, all GPUs, # Launch the simulation by limiting resources visible to Flower's VCE hist = fl.simulation.start_simulation( - ... + # ... # Out of all CPUs and GPUs available in your system, # only 8xCPUs and 1xGPUs would be used for simulation. - ray_init_args = {'num_cpus': 8, 'num_gpus': 1} + ray_init_args={"num_cpus": 8, "num_gpus": 1} ) - - Assigning client resources ~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default the :code:`VirtualClientEngine` assigns a single CPU core (and nothing else) to each virtual client. This means that if your system has 10 cores, that many virtual clients can be concurrently running. -More often than not, you would probably like to adjust the resources your clients get assigned based on the complexity (i.e. compute and memory footprint) of your FL workload. You can do so when starting your simulation by setting the argument `client_resources` to `start_simulation `_. Two keys are internally used by Ray to schedule and spawn workloads (in our case Flower clients): +By default the ``VirtualClientEngine`` assigns a single CPU core (and nothing else) to +each virtual client. This means that if your system has 10 cores, that many virtual +clients can be concurrently running. -* :code:`num_cpus` indicates the number of CPU cores a client would get. -* :code:`num_gpus` indicates the **ratio** of GPU memory a client gets assigned. +More often than not, you would probably like to adjust the resources your clients get +assigned based on the complexity (i.e. compute and memory footprint) of your FL +workload. You can do so when starting your simulation by setting the argument +`client_resources` to `start_simulation +`_. Two keys are internally used by +Ray to schedule and spawn workloads (in our case Flower clients): + +- ``num_cpus`` indicates the number of CPU cores a client would get. +- ``num_gpus`` indicates the **ratio** of GPU memory a client gets assigned. Let's see a few examples: @@ -74,90 +117,140 @@ Let's see a few examples: import flwr as fl # each client gets 1xCPU (this is the default if no resources are specified) - my_client_resources = {'num_cpus': 1, 'num_gpus': 0.0} + my_client_resources = {"num_cpus": 1, "num_gpus": 0.0} # each client gets 2xCPUs and half a GPU. (with a single GPU, 2 clients run concurrently) - my_client_resources = {'num_cpus': 2, 'num_gpus': 0.5} + my_client_resources = {"num_cpus": 2, "num_gpus": 0.5} # 10 client can run concurrently on a single GPU, but only if you have 20 CPU threads. - my_client_resources = {'num_cpus': 2, 'num_gpus': 0.1} + my_client_resources = {"num_cpus": 2, "num_gpus": 0.1} # Launch the simulation hist = fl.simulation.start_simulation( - ... - client_resources = my_client_resources # A Python dict specifying CPU/GPU resources + # ... + client_resources=my_client_resources # A Python dict specifying CPU/GPU resources ) -While the :code:`client_resources` can be used to control the degree of concurrency in your FL simulation, this does not stop you from running dozens, hundreds or even thousands of clients in the same round and having orders of magnitude more `dormant` (i.e. not participating in a round) clients. Let's say you want to have 100 clients per round but your system can only accommodate 8 clients concurrently. The :code:`VirtualClientEngine` will schedule 100 jobs to run (each simulating a client sampled by the strategy) and then will execute them in a resource-aware manner in batches of 8. +While the ``client_resources`` can be used to control the degree of concurrency in your +FL simulation, this does not stop you from running dozens, hundreds or even thousands of +clients in the same round and having orders of magnitude more `dormant` (i.e. not +participating in a round) clients. Let's say you want to have 100 clients per round but +your system can only accommodate 8 clients concurrently. The ``VirtualClientEngine`` +will schedule 100 jobs to run (each simulating a client sampled by the strategy) and +then will execute them in a resource-aware manner in batches of 8. -To understand all the intricate details on how resources are used to schedule FL clients and how to define custom resources, please take a look at the `Ray documentation `_. +To understand all the intricate details on how resources are used to schedule FL clients +and how to define custom resources, please take a look at the `Ray documentation +`_. Simulation examples ~~~~~~~~~~~~~~~~~~~ -A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and PyTorch are provided in the `Flower repository `_. You can run them on Google Colab too: - -* `Tensorflow/Keras Simulation `_: 100 clients collaboratively train a MLP model on MNIST. -* `PyTorch Simulation `_: 100 clients collaboratively train a CNN model on MNIST. - +A few ready-to-run complete examples for Flower simulation in Tensorflow/Keras and +PyTorch are provided in the `Flower repository `_. You +can run them on Google Colab too: +- `Tensorflow/Keras Simulation + `_: 100 + clients collaboratively train a MLP model on MNIST. +- `PyTorch Simulation + `_: 100 clients + collaboratively train a CNN model on MNIST. Multi-node Flower simulations ----------------------------- -Flower's :code:`VirtualClientEngine` allows you to run FL simulations across multiple compute nodes. Before starting your multi-node simulation ensure that you: - -#. Have the same Python environment in all nodes. -#. Have a copy of your code (e.g. your entire repo) in all nodes. -#. Have a copy of your dataset in all nodes (more about this in :ref:`simulation considerations `) -#. Pass :code:`ray_init_args={"address"="auto"}` to `start_simulation `_ so the :code:`VirtualClientEngine` attaches to a running Ray instance. -#. Start Ray on you head node: on the terminal type :code:`ray start --head`. This command will print a few lines, one of which indicates how to attach other nodes to the head node. -#. Attach other nodes to the head node: copy the command shown after starting the head and execute it on terminal of a new node: for example :code:`ray start --address='192.168.1.132:6379'` - -With all the above done, you can run your code from the head node as you would if the simulation was running on a single node. - -Once your simulation is finished, if you'd like to dismantle your cluster you simply need to run the command :code:`ray stop` in each node's terminal (including the head node). +Flower's ``VirtualClientEngine`` allows you to run FL simulations across multiple +compute nodes. Before starting your multi-node simulation ensure that you: + +1. Have the same Python environment in all nodes. +2. Have a copy of your code (e.g. your entire repo) in all nodes. +3. Have a copy of your dataset in all nodes (more about this in :ref:`simulation + considerations `) +4. Pass ``ray_init_args={"address"="auto"}`` to `start_simulation + `_ so the ``VirtualClientEngine`` + attaches to a running Ray instance. +5. Start Ray on you head node: on the terminal type ``ray start --head``. This command + will print a few lines, one of which indicates how to attach other nodes to the head + node. +6. Attach other nodes to the head node: copy the command shown after starting the head + and execute it on terminal of a new node: for example ``ray start + --address='192.168.1.132:6379'`` + +With all the above done, you can run your code from the head node as you would if the +simulation was running on a single node. + +Once your simulation is finished, if you'd like to dismantle your cluster you simply +need to run the command ``ray stop`` in each node's terminal (including the head node). Multi-node simulation good-to-know ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here we list a few interesting functionality when running multi-node FL simulations: -User :code:`ray status` to check all nodes connected to your head node as well as the total resources available to the :code:`VirtualClientEngine`. +User ``ray status`` to check all nodes connected to your head node as well as the total +resources available to the ``VirtualClientEngine``. -When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will be visible by the head node. This means that the :code:`VirtualClientEngine` can schedule as many `virtual` clients as that node can possible run. In some settings you might want to exclude certain resources from the simulation. You can do this by appending `--num-cpus=` and/or `--num-gpus=` in any :code:`ray start` command (including when starting the head) +When attaching a new node to the head, all its resources (i.e. all CPUs, all GPUs) will +be visible by the head node. This means that the ``VirtualClientEngine`` can schedule as +many `virtual` clients as that node can possible run. In some settings you might want to +exclude certain resources from the simulation. You can do this by appending +`--num-cpus=` and/or `--num-gpus=` in any ``ray +start`` command (including when starting the head) .. _considerations-for-simulations: - Considerations for simulations ------------------------------ .. note:: - We are actively working on these fronts so to make it trivial to run any FL workload with Flower simulation. + We are actively working on these fronts so to make it trivial to run any FL workload + with Flower simulation. -The current VCE allows you to run Federated Learning workloads in simulation mode whether you are prototyping simple scenarios on your personal laptop or you want to train a complex FL pipeline across multiple high-performance GPU nodes. While we add more capabilities to the VCE, the points below highlight some of the considerations to keep in mind when designing your FL pipeline with Flower. We also highlight a couple of current limitations in our implementation. +The current VCE allows you to run Federated Learning workloads in simulation mode +whether you are prototyping simple scenarios on your personal laptop or you want to +train a complex FL pipeline across multiple high-performance GPU nodes. While we add +more capabilities to the VCE, the points below highlight some of the considerations to +keep in mind when designing your FL pipeline with Flower. We also highlight a couple of +current limitations in our implementation. GPU resources ~~~~~~~~~~~~~ -The VCE assigns a share of GPU memory to a client that specifies the key :code:`num_gpus` in :code:`client_resources`. This being said, Ray (used internally by the VCE) is by default: - - -* not aware of the total VRAM available on the GPUs. This means that if you set :code:`num_gpus=0.5` and you have two GPUs in your system with different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients concurrently. -* not aware of other unrelated (i.e. not created by the VCE) workloads are running on the GPU. Two takeaways from this are: +The VCE assigns a share of GPU memory to a client that specifies the key ``num_gpus`` in +``client_resources``. This being said, Ray (used internally by the VCE) is by default: - * Your Flower server might need a GPU to evaluate the `global model` after aggregation (by instance when making use of the `evaluate method `_) - * If you want to run several independent Flower simulations on the same machine you need to mask-out your GPUs with :code:`CUDA_VISIBLE_DEVICES=""` when launching your experiment. +- not aware of the total VRAM available on the GPUs. This means that if you set + ``num_gpus=0.5`` and you have two GPUs in your system with different (e.g. 32GB and + 8GB) VRAM amounts, they both would run 2 clients concurrently. +- not aware of other unrelated (i.e. not created by the VCE) workloads are running on + the GPU. Two takeaways from this are: + - Your Flower server might need a GPU to evaluate the `global model` after aggregation + (by instance when making use of the `evaluate method + `_) + - If you want to run several independent Flower simulations on the same machine you + need to mask-out your GPUs with ``CUDA_VISIBLE_DEVICES=""`` when launching + your experiment. -In addition, the GPU resource limits passed to :code:`client_resources` are not `enforced` (i.e. they can be exceeded) which can result in the situation of client using more VRAM than the ratio specified when starting the simulation. +In addition, the GPU resource limits passed to ``client_resources`` are not `enforced` +(i.e. they can be exceeded) which can result in the situation of client using more VRAM +than the ratio specified when starting the simulation. TensorFlow with GPUs -"""""""""""""""""""" +++++++++++++++++++++ -When `using a GPU with TensorFlow `_ nearly your entire GPU memory of all your GPUs visible to the process will be mapped. This is done by TensorFlow for optimization purposes. However, in settings such as FL simulations where we want to split the GPU into multiple `virtual` clients, this is not a desirable mechanism. Luckily we can disable this default behavior by `enabling memory growth `_. +When `using a GPU with TensorFlow `_ nearly your +entire GPU memory of all your GPUs visible to the process will be mapped. This is done +by TensorFlow for optimization purposes. However, in settings such as FL simulations +where we want to split the GPU into multiple `virtual` clients, this is not a desirable +mechanism. Luckily we can disable this default behavior by `enabling memory growth +`_. -This would need to be done in the main process (which is where the server would run) and in each Actor created by the VCE. By means of :code:`actor_kwargs` we can pass the reserved key `"on_actor_init_fn"` in order to specify a function to be executed upon actor initialization. In this case, to enable GPU growth for TF workloads. It would look as follows: +This would need to be done in the main process (which is where the server would run) and +in each Actor created by the VCE. By means of ``actor_kwargs`` we can pass the reserved +key `"on_actor_init_fn"` in order to specify a function to be executed upon actor +initialization. In this case, to enable GPU growth for TF workloads. It would look as +follows: .. code-block:: python @@ -170,19 +263,29 @@ This would need to be done in the main process (which is where the server would # Start Flower simulation hist = fl.simulation.start_simulation( - ... + # ... actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. + "on_actor_init_fn": enable_tf_gpu_growth # <-- To be executed upon actor init. }, ) -This is precisely the mechanism used in `Tensorflow/Keras Simulation `_ example. - +This is precisely the mechanism used in `Tensorflow/Keras Simulation +`_ example. Multi-node setups ~~~~~~~~~~~~~~~~~ -* The VCE does not currently offer a way to control on which node a particular `virtual` client is executed. In other words, if more than a single node have the resources needed by a client to run, then any of those nodes could get the client workload scheduled onto. Later in the FL process (i.e. in a different round) the same client could be executed by a different node. Depending on how your clients access their datasets, this might require either having a copy of all dataset partitions on all nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data duplication. - -* By definition virtual clients are `stateless` due to their ephemeral nature. A client state can be implemented as part of the Flower client class but users need to ensure this saved to persistent storage (e.g. a database, disk) and that can be retrieve later by the same client regardless on which node it is running from. This is related to the point above also since, in some way, the client's dataset could be seen as a type of `state`. - +- The VCE does not currently offer a way to control on which node a particular `virtual` + client is executed. In other words, if more than a single node have the resources + needed by a client to run, then any of those nodes could get the client workload + scheduled onto. Later in the FL process (i.e. in a different round) the same client + could be executed by a different node. Depending on how your clients access their + datasets, this might require either having a copy of all dataset partitions on all + nodes or a dataset serving mechanism (e.g. using nfs, a database) to circumvent data + duplication. +- By definition virtual clients are `stateless` due to their ephemeral nature. A client + state can be implemented as part of the Flower client class but users need to ensure + this saved to persistent storage (e.g. a database, disk) and that can be retrieve + later by the same client regardless on which node it is running from. This is related + to the point above also since, in some way, the client's dataset could be seen as a + type of `state`. diff --git a/doc/source/how-to-save-and-load-model-checkpoints.rst b/doc/source/how-to-save-and-load-model-checkpoints.rst index 0d711e375cd8..f2f12dae97be 100644 --- a/doc/source/how-to-save-and-load-model-checkpoints.rst +++ b/doc/source/how-to-save-and-load-model-checkpoints.rst @@ -1,17 +1,19 @@ Save and load model checkpoints =============================== -Flower does not automatically save model updates on the server-side. This how-to guide describes the steps to save (and load) model checkpoints in Flower. - +Flower does not automatically save model updates on the server-side. This how-to guide +describes the steps to save (and load) model checkpoints in Flower. Model checkpointing ------------------- -Model updates can be persisted on the server-side by customizing :code:`Strategy` methods. -Implementing custom strategies is always an option, but for many cases it may be more convenient to simply customize an existing strategy. -The following code example defines a new :code:`SaveModelStrategy` which customized the existing built-in :code:`FedAvg` strategy. -In particular, it customizes :code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class (:code:`FedAvg`). -It then continues to save returned (aggregated) weights before it returns those aggregated weights to the caller (i.e., the server): +Model updates can be persisted on the server-side by customizing ``Strategy`` methods. +Implementing custom strategies is always an option, but for many cases it may be more +convenient to simply customize an existing strategy. The following code example defines +a new ``SaveModelStrategy`` which customized the existing built-in ``FedAvg`` strategy. +In particular, it customizes ``aggregate_fit`` by calling ``aggregate_fit`` in the base +class (``FedAvg``). It then continues to save returned (aggregated) weights before it +returns those aggregated weights to the caller (i.e., the server): .. code-block:: python @@ -24,11 +26,15 @@ It then continues to save returned (aggregated) weights before it returns those ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics - aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures) - + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + if aggregated_parameters is not None: # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays(aggregated_parameters) + aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + aggregated_parameters + ) # Save aggregated_ndarrays print(f"Saving round {server_round} aggregated_ndarrays...") @@ -36,24 +42,27 @@ It then continues to save returned (aggregated) weights before it returns those return aggregated_parameters, aggregated_metrics + # Create strategy and run server strategy = SaveModelStrategy( # (same arguments as FedAvg here) ) fl.server.start_server(strategy=strategy) - Save and load PyTorch checkpoints --------------------------------- -Similar to the previous example but with a few extra steps, we'll show how to -store a PyTorch checkpoint we'll use the ``torch.save`` function. -Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be transformed into a list of NumPy ``ndarray``'s, -then those are transformed into the PyTorch ``state_dict`` following the ``OrderedDict`` class structure. +Similar to the previous example but with a few extra steps, we'll show how to store a +PyTorch checkpoint we'll use the ``torch.save`` function. Firstly, ``aggregate_fit`` +returns a ``Parameters`` object that has to be transformed into a list of NumPy +``ndarray``'s, then those are transformed into the PyTorch ``state_dict`` following the +``OrderedDict`` class structure. .. code-block:: python net = cifar.Net().to(DEVICE) + + class SaveModelStrategy(fl.server.strategy.FedAvg): def aggregate_fit( self, @@ -64,14 +73,18 @@ then those are transformed into the PyTorch ``state_dict`` following the ``Order """Aggregate model weights using weighted average and store checkpoint""" # Call aggregate_fit from base class (FedAvg) to aggregate parameters and metrics - aggregated_parameters, aggregated_metrics = super().aggregate_fit(server_round, results, failures) - + aggregated_parameters, aggregated_metrics = super().aggregate_fit( + server_round, results, failures + ) + if aggregated_parameters is not None: print(f"Saving round {server_round} aggregated_parameters...") # Convert `Parameters` to `List[np.ndarray]` - aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays(aggregated_parameters) - + aggregated_ndarrays: List[np.ndarray] = fl.common.parameters_to_ndarrays( + aggregated_parameters + ) + # Convert `List[np.ndarray]` to PyTorch`state_dict` params_dict = zip(net.state_dict().keys(), aggregated_ndarrays) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) @@ -82,7 +95,8 @@ then those are transformed into the PyTorch ``state_dict`` following the ``Order return aggregated_parameters, aggregated_metrics -To load your progress, you simply append the following lines to your code. Note that this will iterate over all saved checkpoints and load the latest one: +To load your progress, you simply append the following lines to your code. Note that +this will iterate over all saved checkpoints and load the latest one: .. code-block:: python @@ -94,4 +108,5 @@ To load your progress, you simply append the following lines to your code. Note state_dict_ndarrays = [v.cpu().numpy() for v in net.state_dict().values()] parameters = fl.common.ndarrays_to_parameters(state_dict_ndarrays) -Return/use this object of type ``Parameters`` wherever necessary, such as in the ``initial_parameters`` when defining a ``Strategy``. \ No newline at end of file +Return/use this object of type ``Parameters`` wherever necessary, such as in the +``initial_parameters`` when defining a ``Strategy``. diff --git a/doc/source/how-to-upgrade-to-flower-1.0.rst b/doc/source/how-to-upgrade-to-flower-1.0.rst index c0721b0f3736..5f10f16a551f 100644 --- a/doc/source/how-to-upgrade-to-flower-1.0.rst +++ b/doc/source/how-to-upgrade-to-flower-1.0.rst @@ -1,8 +1,10 @@ Upgrade to Flower 1.0 ===================== -Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few breaking changes that make it necessary to change the code of existing 0.x-series projects. - +Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable foundation for +future growth. Compared to Flower 0.19 (and other 0.x series releases), there are a few +breaking changes that make it necessary to change the code of existing 0.x-series +projects. Install update -------------- @@ -14,11 +16,13 @@ Here's how to update an existing installation to Flower 1.0 using either pip or - ``python -m pip install -U flwr`` (when using ``start_server`` and ``start_client``) - ``python -m pip install -U 'flwr[simulation]'`` (when using ``start_simulation``) -- Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). +- Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't + forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry + install``). - ``flwr = "^1.0.0"`` (when using ``start_server`` and ``start_client``) - - ``flwr = { version = "^1.0.0", extras = ["simulation"] }`` (when using ``start_simulation``) - + - ``flwr = { version = "^1.0.0", extras = ["simulation"] }`` (when using + ``start_simulation``) Required changes ---------------- @@ -28,64 +32,96 @@ The following breaking changes require manual updates. General ~~~~~~~ -Pass all arguments as keyword arguments (not as positional arguments). Here's an example: +Pass all arguments as keyword arguments (not as positional arguments). Here's an +example: - Flower 0.19 (positional arguments): ``start_client("127.0.0.1:8080", FlowerClient())`` -- Flower 1.0 (keyword arguments): ``start_client(server_address="127.0.0.1:8080", client=FlowerClient())`` +- Flower 1.0 (keyword arguments): ``start_client(server_address="127.0.0.1:8080", + client=FlowerClient())`` Client ~~~~~~ -- Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to ``def get_parameters(self, config):`` -- Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def get_parameters(self, ins: GetParametersIns):`` +- Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to ``def + get_parameters(self, config):`` +- Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def + get_parameters(self, ins: GetParametersIns):`` Strategies / ``start_server`` / ``start_simulation`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and ``start_simulation``. Here's an example: +- Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and + ``start_simulation``. Here's an example: - - Flower 0.19: ``start_server(..., config={"num_rounds": 3, "round_timeout": 600.0}, ...)`` - - Flower 1.0: ``start_server(..., config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` + - Flower 0.19: ``start_server(..., config={"num_rounds": 3, "round_timeout": 600.0}, + ...)`` + - Flower 1.0: ``start_server(..., config=flwr.server.ServerConfig(num_rounds=3, + round_timeout=600.0), ...)`` -- Replace ``num_rounds=1`` in ``start_simulation`` with the new ``config=ServerConfig(...)`` (see previous item) -- Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. Distributed evaluation on all clients can be enabled by configuring the strategy to sample all clients for evaluation after the last round of training. +- Replace ``num_rounds=1`` in ``start_simulation`` with the new + ``config=ServerConfig(...)`` (see previous item) +- Remove ``force_final_distributed_eval`` parameter from calls to ``start_server``. + Distributed evaluation on all clients can be enabled by configuring the strategy to + sample all clients for evaluation after the last round of training. - Rename parameter/ndarray conversion functions: - ``parameters_to_weights`` --> ``parameters_to_ndarrays`` - ``weights_to_parameters`` --> ``ndarrays_to_parameters`` -- Strategy initialization: if the strategy relies on the default values for ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and ``fraction_evaluate`` manually to ``0.1``. Projects that do not manually create a strategy (by calling ``start_server`` or ``start_simulation`` without passing a strategy instance) should now manually initialize FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``. +- Strategy initialization: if the strategy relies on the default values for + ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and + ``fraction_evaluate`` manually to ``0.1``. Projects that do not manually create a + strategy (by calling ``start_server`` or ``start_simulation`` without passing a + strategy instance) should now manually initialize FedAvg with ``fraction_fit`` and + ``fraction_evaluate`` set to ``0.1``. - Rename built-in strategy parameters (e.g., ``FedAvg``): - ``fraction_eval`` --> ``fraction_evaluate`` - ``min_eval_clients`` --> ``min_evaluate_clients`` - ``eval_fn`` --> ``evaluate_fn`` -- Rename ``rnd`` to ``server_round``. This impacts multiple methods and functions, for example, ``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``. +- Rename ``rnd`` to ``server_round``. This impacts multiple methods and functions, for + example, ``configure_fit``, ``aggregate_fit``, ``configure_evaluate``, + ``aggregate_evaluate``, and ``evaluate_fn``. - Add ``server_round`` and ``config`` to ``evaluate_fn``: - - Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` - - Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` + - Flower 0.19: ``def evaluate(parameters: NDArrays) -> Optional[Tuple[float, Dict[str, + Scalar]]]:`` + - Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, config: + Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` Custom strategies ~~~~~~~~~~~~~~~~~ -- The type of parameter ``failures`` has changed from ``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in ``aggregate_fit``) and ``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in ``aggregate_evaluate``) -- The ``Strategy`` method ``evaluate`` now receives the current round of federated learning/evaluation as the first parameter: +- The type of parameter ``failures`` has changed from ``List[BaseException]`` to + ``List[Union[Tuple[ClientProxy, FitRes], BaseException]]`` (in ``aggregate_fit``) and + ``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in + ``aggregate_evaluate``) +- The ``Strategy`` method ``evaluate`` now receives the current round of federated + learning/evaluation as the first parameter: - - Flower 0.19: ``def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` - - Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:`` + - Flower 0.19: ``def evaluate(self, parameters: Parameters) -> Optional[Tuple[float, + Dict[str, Scalar]]]:`` + - Flower 1.0: ``def evaluate(self, server_round: int, parameters: Parameters) -> + Optional[Tuple[float, Dict[str, Scalar]]]:`` Optional improvements --------------------- -Along with the necessary changes above, there are a number of potential improvements that just became possible: - -- Remove "placeholder" methods from subclasses of ``Client`` or ``NumPyClient``. If you, for example, use server-side evaluation, then empty placeholder implementations of ``evaluate`` are no longer necessary. -- Configure the round timeout via ``start_simulation``: ``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` +Along with the necessary changes above, there are a number of potential improvements +that just became possible: +- Remove "placeholder" methods from subclasses of ``Client`` or ``NumPyClient``. If you, + for example, use server-side evaluation, then empty placeholder implementations of + ``evaluate`` are no longer necessary. +- Configure the round timeout via ``start_simulation``: ``start_simulation(..., + config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), ...)`` Further help ------------ -Most official `Flower code examples `_ are already updated to Flower 1.0, they can serve as a reference for using the Flower 1.0 API. If there are further questions, `join the Flower Slack `_ and use the channel ``#questions``. +Most official `Flower code examples +`_ are already updated to Flower 1.0, +they can serve as a reference for using the Flower 1.0 API. If there are further +questions, `join the Flower Slack `_ and use the channel +``#questions``. diff --git a/doc/source/how-to-upgrade-to-flower-next.rst b/doc/source/how-to-upgrade-to-flower-next.rst index e1e94f095b60..9a476f9865e1 100644 --- a/doc/source/how-to-upgrade-to-flower-next.rst +++ b/doc/source/how-to-upgrade-to-flower-next.rst @@ -1,11 +1,13 @@ Upgrade to Flower Next ====================== -Welcome to the migration guide for updating Flower to Flower Next! Whether you're a seasoned user -or just getting started, this guide will help you smoothly transition your existing setup to take -advantage of the latest features and improvements in Flower Next, starting from version 1.8. +Welcome to the migration guide for updating Flower to Flower Next! Whether you're a +seasoned user or just getting started, this guide will help you smoothly transition your +existing setup to take advantage of the latest features and improvements in Flower Next, +starting from version 1.8. .. note:: + This guide shows how to reuse pre-``1.8`` Flower code with minimum code changes by using the *compatibility layer* in Flower Next. In another guide, we will show how to run Flower Next end-to-end with pure Flower Next APIs. @@ -18,26 +20,44 @@ Let's dive in! - https://github.com/jgm/pandoc/issues/3973#issuecomment-337087394 .. |clientapp_link| replace:: ``ClientApp()`` + .. |serverapp_link| replace:: ``ServerApp()`` + .. |startclient_link| replace:: ``start_client()`` + .. |startserver_link| replace:: ``start_server()`` + .. |startsim_link| replace:: ``start_simulation()`` + .. |runsim_link| replace:: ``run_simulation()`` + .. |flowernext_superlink_link| replace:: ``flower-superlink`` + .. |flowernext_clientapp_link| replace:: ``flower-client-app`` + .. |flowernext_serverapp_link| replace:: ``flower-server-app`` + .. |flower_simulation_link| replace:: ``flower-simulation`` + .. _clientapp_link: ref-api/flwr.client.ClientApp.html + +.. _flower_simulation_link: ref-api-cli.html#flower-simulation + +.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app + +.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app + +.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink + +.. _runsim_link: ref-api/flwr.simulation.run_simulation.html + .. _serverapp_link: ref-api/flwr.server.ServerApp.html + .. _startclient_link: ref-api/flwr.client.start_client.html + .. _startserver_link: ref-api/flwr.server.start_server.html -.. _startsim_link: ref-api/flwr.simulation.start_simulation.html -.. _runsim_link: ref-api/flwr.simulation.run_simulation.html -.. _flowernext_superlink_link: ref-api-cli.html#flower-superlink -.. _flowernext_clientapp_link: ref-api-cli.html#flower-client-app -.. _flowernext_serverapp_link: ref-api-cli.html#flower-server-app -.. _flower_simulation_link: ref-api-cli.html#flower-simulation +.. _startsim_link: ref-api/flwr.simulation.start_simulation.html Install update -------------- @@ -48,19 +68,18 @@ Using pip Here's how to update an existing installation of Flower to Flower Next with ``pip``: .. code-block:: bash - + $ python -m pip install -U flwr or if you need Flower Next with simulation: .. code-block:: bash - - $ python -m pip install -U "flwr[simulation]" + $ python -m pip install -U "flwr[simulation]" Ensure you set the following version constraint in your ``requirements.txt`` -.. code-block:: +.. code-block:: # Without simulation support flwr>=1.8,<2.0 @@ -81,32 +100,37 @@ or ``pyproject.toml``: Using Poetry ~~~~~~~~~~~~ -Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). +Update the ``flwr`` dependency in ``pyproject.toml`` and then reinstall (don't forget to +delete ``poetry.lock`` via ``rm poetry.lock`` before running ``poetry install``). Ensure you set the following version constraint in your ``pyproject.toml``: .. code-block:: toml + :substitutions: - [tool.poetry.dependencies] - python = "^3.8" + [tool.poetry.dependencies] + python = "^|python_version|" - # Without simulation support - flwr = ">=1.8,<2.0" + # Without simulation support + flwr = ">=1.8,<2.0" - # With simulation support - flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } + # With simulation support + flwr = { version = ">=1.8,<2.0", extras = ["simulation"] } Required changes ---------------- In Flower Next, the *infrastructure* and *application layers* have been decoupled. -Instead of starting a client in code via ``start_client()``, you create a |clientapp_link|_ and start it via the command line. -Instead of starting a server in code via ``start_server()``, you create a |serverapp_link|_ and start it via the command line. -The long-running components of server and client are called SuperLink and SuperNode. -The following non-breaking changes that require manual updates and allow you to run your project both in the traditional way and in the Flower Next way: +Instead of starting a client in code via ``start_client()``, you create a +|clientapp_link|_ and start it via the command line. Instead of starting a server in +code via ``start_server()``, you create a |serverapp_link|_ and start it via the command +line. The long-running components of server and client are called SuperLink and +SuperNode. The following non-breaking changes that require manual updates and allow you +to run your project both in the traditional way and in the Flower Next way: |clientapp_link|_ ~~~~~~~~~~~~~~~~~ + - Wrap your existing client with |clientapp_link|_ instead of launching it via |startclient_link|_. Here's an example: @@ -115,23 +139,25 @@ The following non-breaking changes that require manual updates and allow you to # Flower 1.8 def client_fn(cid: str): - return flwr.client.FlowerClient().to_client() - + return flwr.client.FlowerClient().to_client() + + app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) # Flower 1.7 if __name__ == "__main__": flwr.client.start_client( - server_address="127.0.0.1:8080", - client=flwr.client.FlowerClient().to_client(), + server_address="127.0.0.1:8080", + client=flwr.client.FlowerClient().to_client(), ) |serverapp_link|_ ~~~~~~~~~~~~~~~~~ -- Wrap your existing strategy with |serverapp_link|_ instead of starting the server - via |startserver_link|_. Here's an example: + +- Wrap your existing strategy with |serverapp_link|_ instead of starting the server via + |startserver_link|_. Here's an example: .. code-block:: python :emphasize-lines: 2,9 @@ -152,13 +178,14 @@ The following non-breaking changes that require manual updates and allow you to Deployment ~~~~~~~~~~ + - Run the ``SuperLink`` using |flowernext_superlink_link|_ before running, in sequence, - |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need to - execute `client.py` and `server.py` as Python scripts. + |flowernext_clientapp_link|_ (2x) and |flowernext_serverapp_link|_. There is no need + to execute `client.py` and `server.py` as Python scripts. - Here's an example to start the server without HTTPS (only for prototyping): .. code-block:: bash - + # Start a Superlink $ flower-superlink --insecure @@ -171,8 +198,9 @@ Deployment # In yet another terminal window, run the ServerApp (this starts the actual training run) $ flower-server-app server:app --insecure -- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, ``--ssl-certfile``, and ``--ssl-keyfile`` command line - options to pass paths to (CA certificate, server certificate, and server private key). +- Here's another example to start with HTTPS. Use the ``--ssl-ca-certfile``, + ``--ssl-certfile``, and ``--ssl-keyfile`` command line options to pass paths to (CA + certificate, server certificate, and server private key). .. code-block:: bash @@ -199,6 +227,7 @@ Deployment Simulation in CLI ~~~~~~~~~~~~~~~~~ + - Wrap your existing client and strategy with |clientapp_link|_ and |serverapp_link|_, respectively. There is no need to use |startsim_link|_ anymore. Here's an example: @@ -208,13 +237,16 @@ Simulation in CLI # Regular Flower client implementation class FlowerClient(NumPyClient): # ... + pass + # Flower 1.8 def client_fn(cid: str): - return FlowerClient().to_client() - + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) server_app = flwr.server.ServerApp( @@ -226,12 +258,12 @@ Simulation in CLI if __name__ == "__main__": hist = flwr.simulation.start_simulation( num_clients=100, - ... + # ... ) -- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` object in the - code instead of executing the Python script. Here's an example (assuming the - ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): +- Run |flower_simulation_link|_ in CLI and point to the ``server_app`` / ``client_app`` + object in the code instead of executing the Python script. Here's an example (assuming + the ``server_app`` and ``client_app`` objects are in a ``sim.py`` module): .. code-block:: bash @@ -246,8 +278,8 @@ Simulation in CLI # Flower 1.7 $ python sim.py -- Set default resources for each |clientapp_link|_ using the ``--backend-config`` command - line argument instead of setting the ``client_resources`` argument in +- Set default resources for each |clientapp_link|_ using the ``--backend-config`` + command line argument instead of setting the ``client_resources`` argument in |startsim_link|_. Here's an example: .. code-block:: bash @@ -266,26 +298,27 @@ Simulation in CLI # Flower 1.7 (in `sim.py`) if __name__ == "__main__": hist = flwr.simulation.start_simulation( - num_clients=100, - client_resources = {'num_cpus': 2, "num_gpus": 0.25}, - ... + num_clients=100, client_resources={"num_cpus": 2, "num_gpus": 0.25}, ... ) Simulation in a Notebook ~~~~~~~~~~~~~~~~~~~~~~~~ + - Run |runsim_link|_ in your notebook instead of |startsim_link|_. Here's an example: .. code-block:: python :emphasize-lines: 19,27 - NUM_CLIENTS = + NUM_CLIENTS = 10 # Replace by any integer greater than zero + def client_fn(cid: str): # ... - return FlowerClient().to_client() - + return FlowerClient().to_client() + + client_app = flwr.client.ClientApp( - client_fn=client_fn, + client_fn=client_fn, ) server_app = flwr.server.ServerApp( @@ -297,7 +330,7 @@ Simulation in a Notebook # Flower 1.8 flwr.simulation.run_simulation( - server_app=server_app, + server_app=server_app, client_app=client_app, num_supernodes=NUM_CLIENTS, backend_config=backend_config, @@ -312,18 +345,17 @@ Simulation in a Notebook client_resources=backend_config["client_resources"], ) - Further help ------------ Some official `Flower code examples `_ are already -updated to Flower Next so they can serve as a reference for using the Flower Next API. If there are -further questions, `join the Flower Slack `_ and use the channel ``#questions``. -You can also `participate in Flower Discuss `_ where you can find us -answering questions, or share and learn from others about migrating to Flower Next. +updated to Flower Next so they can serve as a reference for using the Flower Next API. +If there are further questions, `join the Flower Slack `_ +and use the channel ``#questions``. You can also `participate in Flower Discuss +`_ where you can find us answering questions, or share and +learn from others about migrating to Flower Next. .. admonition:: Important - :class: important As we continuously enhance Flower Next at a rapid pace, we'll be periodically updating this guide. Please feel free to share any feedback with us! diff --git a/doc/source/how-to-use-built-in-mods.rst b/doc/source/how-to-use-built-in-mods.rst index 341139175074..970b2055ec23 100644 --- a/doc/source/how-to-use-built-in-mods.rst +++ b/doc/source/how-to-use-built-in-mods.rst @@ -1,14 +1,19 @@ Use Built-in Mods ================= -**Note: This tutorial covers experimental features. The functionality and interfaces may change in future versions.** +**Note: This tutorial covers experimental features. The functionality and interfaces may +change in future versions.** -In this tutorial, we will learn how to utilize built-in mods to augment the behavior of a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations before and after a task is processed in the ``ClientApp``. +In this tutorial, we will learn how to utilize built-in mods to augment the behavior of +a ``ClientApp``. Mods (sometimes also called Modifiers) allow us to perform operations +before and after a task is processed in the ``ClientApp``. What are Mods? -------------- -A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` is as follows: +A Mod is a callable that wraps around a ``ClientApp``. It can manipulate or inspect the +incoming ``Message`` and the resulting outgoing ``Message``. The signature for a ``Mod`` +is as follows: .. code-block:: python @@ -51,12 +56,13 @@ Define your client function (``client_fn``) that will be wrapped by the mod(s): def client_fn(cid): # Your client code goes here. - return # your client + return # your client 3. Create the ``ClientApp`` with mods -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The order in which you provide the mods matters: +Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. The +order in which you provide the mods matters: .. code-block:: python @@ -65,25 +71,31 @@ Create your ``ClientApp`` and pass the mods as a list to the ``mods`` argument. mods=[ example_mod_1, # Mod 1 example_mod_2, # Mod 2 - ] + ], ) Order of execution ------------------ -When the ``ClientApp`` runs, the mods are executed in the order they are provided in the list: +When the ``ClientApp`` runs, the mods are executed in the order they are provided in the +list: 1. ``example_mod_1`` (outermost mod) 2. ``example_mod_2`` (next mod) -3. Message handler (core function that handles the incoming ``Message`` and returns the outgoing ``Message``) +3. Message handler (core function that handles the incoming ``Message`` and returns the + outgoing ``Message``) 4. ``example_mod_2`` (on the way back) 5. ``example_mod_1`` (outermost mod on the way back) -Each mod has a chance to inspect and modify the incoming ``Message`` before passing it to the next mod, and likewise with the outgoing ``Message`` before returning it up the stack. +Each mod has a chance to inspect and modify the incoming ``Message`` before passing it +to the next mod, and likewise with the outgoing ``Message`` before returning it up the +stack. Conclusion ---------- -By following this guide, you have learned how to effectively use mods to enhance your ``ClientApp``'s functionality. Remember that the order of mods is crucial and affects how the input and output are processed. +By following this guide, you have learned how to effectively use mods to enhance your +``ClientApp``'s functionality. Remember that the order of mods is crucial and affects +how the input and output are processed. Enjoy building a more robust and flexible ``ClientApp`` with mods! diff --git a/doc/source/how-to-use-differential-privacy.rst b/doc/source/how-to-use-differential-privacy.rst index 5d4fa3dca1a4..67e54271bb2e 100644 --- a/doc/source/how-to-use-differential-privacy.rst +++ b/doc/source/how-to-use-differential-privacy.rst @@ -1,126 +1,151 @@ Use Differential Privacy ------------------------- -This guide explains how you can utilize differential privacy in the Flower framework. If you are not yet familiar with differential privacy, you can refer to :doc:`explanation-differential-privacy`. +======================== -.. warning:: +This guide explains how you can utilize differential privacy in the Flower framework. If +you are not yet familiar with differential privacy, you can refer to +:doc:`explanation-differential-privacy`. - Differential Privacy in Flower is in a preview phase. If you plan to use these features in a production environment with sensitive data, feel free contact us to discuss your requirements and to receive guidance on how to best use these features. +.. warning:: + Differential Privacy in Flower is in a preview phase. If you plan to use these + features in a production environment with sensitive data, feel free contact us to + discuss your requirements and to receive guidance on how to best use these features. Central Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This approach consists of two separate phases: clipping of the updates and adding noise to the aggregated model. -For the clipping phase, Flower framework has made it possible to decide whether to perform clipping on the server side or the client side. - -- **Server-side Clipping**: This approach has the advantage of the server enforcing uniform clipping across all clients' updates and reducing the communication overhead for clipping values. However, it also has the disadvantage of increasing the computational load on the server due to the need to perform the clipping operation for all clients. -- **Client-side Clipping**: This approach has the advantage of reducing the computational overhead on the server. However, it also has the disadvantage of lacking centralized control, as the server has less control over the clipping process. +---------------------------- +This approach consists of two separate phases: clipping of the updates and adding noise +to the aggregated model. For the clipping phase, Flower framework has made it possible +to decide whether to perform clipping on the server side or the client side. +- **Server-side Clipping**: This approach has the advantage of the server enforcing + uniform clipping across all clients' updates and reducing the communication overhead + for clipping values. However, it also has the disadvantage of increasing the + computational load on the server due to the need to perform the clipping operation for + all clients. +- **Client-side Clipping**: This approach has the advantage of reducing the + computational overhead on the server. However, it also has the disadvantage of lacking + centralized control, as the server has less control over the clipping process. Server-side Clipping -^^^^^^^^^^^^^^^^^^^^ -For central DP with server-side clipping, there are two :code:`Strategy` classes that act as wrappers around the actual :code:`Strategy` instance (for example, :code:`FedAvg`). -The two wrapper classes are :code:`DifferentialPrivacyServerSideFixedClipping` and :code:`DifferentialPrivacyServerSideAdaptiveClipping` for fixed and adaptive clipping. +~~~~~~~~~~~~~~~~~~~~ -.. image:: ./_static/DP/serversideCDP.png - :align: center - :width: 700 - :alt: server side clipping +For central DP with server-side clipping, there are two ``Strategy`` classes that act as +wrappers around the actual ``Strategy`` instance (for example, ``FedAvg``). The two +wrapper classes are ``DifferentialPrivacyServerSideFixedClipping`` and +``DifferentialPrivacyServerSideAdaptiveClipping`` for fixed and adaptive clipping. +.. image:: ./_static/DP/serversideCDP.png + :align: center + :width: 700 + :alt: server side clipping -The code sample below enables the :code:`FedAvg` strategy to use server-side fixed clipping using the :code:`DifferentialPrivacyServerSideFixedClipping` wrapper class. -The same approach can be used with :code:`DifferentialPrivacyServerSideAdaptiveClipping` by adjusting the corresponding input parameters. +The code sample below enables the ``FedAvg`` strategy to use server-side fixed clipping +using the ``DifferentialPrivacyServerSideFixedClipping`` wrapper class. The same +approach can be used with ``DifferentialPrivacyServerSideAdaptiveClipping`` by adjusting +the corresponding input parameters. .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping - - # Create the strategy - strategy = fl.server.strategy.FedAvg(...) - - # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper - dp_strategy = DifferentialPrivacyServerSideFixedClipping( - strategy, - cfg.noise_multiplier, - cfg.clipping_norm, - cfg.num_sampled_clients, - ) + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + # Create the strategy + strategy = fl.server.strategy.FedAvg(...) + # Wrap the strategy with the DifferentialPrivacyServerSideFixedClipping wrapper + dp_strategy = DifferentialPrivacyServerSideFixedClipping( + strategy, + cfg.noise_multiplier, + cfg.clipping_norm, + cfg.num_sampled_clients, + ) Client-side Clipping -^^^^^^^^^^^^^^^^^^^^ -For central DP with client-side clipping, the server sends the clipping value to selected clients on each round. -Clients can use existing Flower :code:`Mods` to perform the clipping. -Two mods are available for fixed and adaptive client-side clipping: :code:`fixedclipping_mod` and :code:`adaptiveclipping_mod` with corresponding server-side wrappers :code:`DifferentialPrivacyClientSideFixedClipping` and :code:`DifferentialPrivacyClientSideAdaptiveClipping`. +~~~~~~~~~~~~~~~~~~~~ -.. image:: ./_static/DP/clientsideCDP.png - :align: center - :width: 800 - :alt: client side clipping +For central DP with client-side clipping, the server sends the clipping value to +selected clients on each round. Clients can use existing Flower ``Mods`` to perform the +clipping. Two mods are available for fixed and adaptive client-side clipping: +``fixedclipping_mod`` and ``adaptiveclipping_mod`` with corresponding server-side +wrappers ``DifferentialPrivacyClientSideFixedClipping`` and +``DifferentialPrivacyClientSideAdaptiveClipping``. +.. image:: ./_static/DP/clientsideCDP.png + :align: center + :width: 800 + :alt: client side clipping -The code sample below enables the :code:`FedAvg` strategy to use differential privacy with client-side fixed clipping using both the :code:`DifferentialPrivacyClientSideFixedClipping` wrapper class and, on the client, :code:`fixedclipping_mod`: +The code sample below enables the ``FedAvg`` strategy to use differential privacy with +client-side fixed clipping using both the ``DifferentialPrivacyClientSideFixedClipping`` +wrapper class and, on the client, ``fixedclipping_mod``: .. code-block:: python - from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping + from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping - # Create the strategy - strategy = fl.server.strategy.FedAvg(...) + # Create the strategy + strategy = fl.server.strategy.FedAvg(...) - # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper - dp_strategy = DifferentialPrivacyClientSideFixedClipping( - strategy, - cfg.noise_multiplier, - cfg.clipping_norm, - cfg.num_sampled_clients, - ) + # Wrap the strategy with the DifferentialPrivacyClientSideFixedClipping wrapper + dp_strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, + cfg.noise_multiplier, + cfg.clipping_norm, + cfg.num_sampled_clients, + ) -In addition to the server-side strategy wrapper, the :code:`ClientApp` needs to configure the matching :code:`fixedclipping_mod` to perform the client-side clipping: +In addition to the server-side strategy wrapper, the ``ClientApp`` needs to configure +the matching ``fixedclipping_mod`` to perform the client-side clipping: .. code-block:: python - from flwr.client.mod import fixedclipping_mod - - # Add fixedclipping_mod to the client-side mods - app = fl.client.ClientApp( - client_fn=client_fn, - mods=[ - fixedclipping_mod, - ] - ) + from flwr.client.mod import fixedclipping_mod + # Add fixedclipping_mod to the client-side mods + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[ + fixedclipping_mod, + ], + ) Local Differential Privacy -~~~~~~~~~~~~~~~~~~~~~~~~~~ -To utilize local differential privacy (DP) and add noise to the client model parameters before transmitting them to the server in Flower, you can use the `LocalDpMod`. The following hyperparameters need to be set: clipping norm value, sensitivity, epsilon, and delta. +-------------------------- + +To utilize local differential privacy (DP) and add noise to the client model parameters +before transmitting them to the server in Flower, you can use the `LocalDpMod`. The +following hyperparameters need to be set: clipping norm value, sensitivity, epsilon, and +delta. .. image:: ./_static/DP/localdp.png - :align: center - :width: 700 - :alt: local DP mod + :align: center + :width: 700 + :alt: local DP mod -Below is a code example that shows how to use :code:`LocalDpMod`: +Below is a code example that shows how to use ``LocalDpMod``: .. code-block:: python - from flwr.client.mod.localdp_mod import LocalDpMod - - # Create an instance of the mod with the required params - local_dp_obj = LocalDpMod( - cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta - ) - # Add local_dp_obj to the client-side mods + from flwr.client.mod.localdp_mod import LocalDpMod - app = fl.client.ClientApp( - client_fn=client_fn, - mods=[local_dp_obj], - ) + # Create an instance of the mod with the required params + local_dp_obj = LocalDpMod(cfg.clipping_norm, cfg.sensitivity, cfg.epsilon, cfg.delta) + # Add local_dp_obj to the client-side mods + app = fl.client.ClientApp( + client_fn=client_fn, + mods=[local_dp_obj], + ) -Please note that the order of mods, especially those that modify parameters, is important when using multiple modifiers. Typically, differential privacy (DP) modifiers should be the last to operate on parameters. +Please note that the order of mods, especially those that modify parameters, is +important when using multiple modifiers. Typically, differential privacy (DP) modifiers +should be the last to operate on parameters. Local Training using Privacy Engines -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -For ensuring data instance-level privacy during local model training on the client side, consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples of using Flower with these engines, please refer to the Flower examples directory (`Opacus `_, `Tensorflow Privacy `_). \ No newline at end of file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For ensuring data instance-level privacy during local model training on the client side, +consider leveraging privacy engines such as Opacus and TensorFlow Privacy. For examples +of using Flower with these engines, please refer to the Flower examples directory +(`Opacus `_, `Tensorflow +Privacy `_). diff --git a/doc/source/how-to-use-strategies.rst b/doc/source/how-to-use-strategies.rst index 8ac120124951..b4803c6059b3 100644 --- a/doc/source/how-to-use-strategies.rst +++ b/doc/source/how-to-use-strategies.rst @@ -1,19 +1,21 @@ Use strategies ============== -Flower allows full customization of the learning process through the :code:`Strategy` abstraction. A number of built-in strategies are provided in the core framework. +Flower allows full customization of the learning process through the ``Strategy`` +abstraction. A number of built-in strategies are provided in the core framework. -There are three ways to customize the way Flower orchestrates the learning process on the server side: - -* Use an existing strategy, for example, :code:`FedAvg` -* Customize an existing strategy with callback functions -* Implement a novel strategy +There are three ways to customize the way Flower orchestrates the learning process on +the server side: +- Use an existing strategy, for example, ``FedAvg`` +- Customize an existing strategy with callback functions +- Implement a novel strategy Use an existing strategy ------------------------ -Flower comes with a number of popular federated learning strategies built-in. A built-in strategy can be instantiated as follows: +Flower comes with a number of popular federated learning strategies built-in. A built-in +strategy can be instantiated as follows: .. code-block:: python @@ -22,7 +24,9 @@ Flower comes with a number of popular federated learning strategies built-in. A strategy = fl.server.strategy.FedAvg() fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -This creates a strategy with all parameters left at their default values and passes it to the :code:`start_server` function. It is usually recommended to adjust a few parameters during instantiation: +This creates a strategy with all parameters left at their default values and passes it +to the ``start_server`` function. It is usually recommended to adjust a few parameters +during instantiation: .. code-block:: python @@ -35,22 +39,26 @@ This creates a strategy with all parameters left at their default values and pas ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) - Customize an existing strategy with callback functions ------------------------------------------------------ -Existing strategies provide several ways to customize their behaviour. Callback functions allow strategies to call user-provided code during execution. +Existing strategies provide several ways to customize their behaviour. Callback +functions allow strategies to call user-provided code during execution. Configuring client fit and client evaluate ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The server can pass new configuration values to the client each round by providing a function to :code:`on_fit_config_fn`. The provided function will be called by the strategy and must return a dictionary of configuration key values pairs that will be sent to the client. -It must return a dictionary of arbitrary configuration values :code:`client.fit` and :code:`client.evaluate` functions during each round of federated learning. +The server can pass new configuration values to the client each round by providing a +function to ``on_fit_config_fn``. The provided function will be called by the strategy +and must return a dictionary of configuration key values pairs that will be sent to the +client. It must return a dictionary of arbitrary configuration values ``client.fit`` and +``client.evaluate`` functions during each round of federated learning. .. code-block:: python import flwr as fl + def get_on_fit_config_fn() -> Callable[[int], Dict[str, str]]: """Return a function which returns training configurations.""" @@ -64,6 +72,7 @@ It must return a dictionary of arbitrary configuration values :code:`client.fit return fit_config + strategy = fl.server.strategy.FedAvg( fraction_fit=0.1, min_fit_clients=10, @@ -72,18 +81,23 @@ It must return a dictionary of arbitrary configuration values :code:`client.fit ) fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3), strategy=strategy) -The :code:`on_fit_config_fn` can be used to pass arbitrary configuration values from server to client, and potentially change these values each round, for example, to adjust the learning rate. -The client will receive the dictionary returned by the :code:`on_fit_config_fn` in its own :code:`client.fit()` function. +The ``on_fit_config_fn`` can be used to pass arbitrary configuration values from server +to client, and potentially change these values each round, for example, to adjust the +learning rate. The client will receive the dictionary returned by the +``on_fit_config_fn`` in its own ``client.fit()`` function. -Similar to :code:`on_fit_config_fn`, there is also :code:`on_evaluate_config_fn` to customize the configuration sent to :code:`client.evaluate()` +Similar to ``on_fit_config_fn``, there is also ``on_evaluate_config_fn`` to customize +the configuration sent to ``client.evaluate()`` Configuring server-side evaluation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Server-side evaluation can be enabled by passing an evaluation function to :code:`evaluate_fn`. - +Server-side evaluation can be enabled by passing an evaluation function to +``evaluate_fn``. Implement a novel strategy -------------------------- -Writing a fully custom strategy is a bit more involved, but it provides the most flexibility. Read the `Implementing Strategies `_ guide to learn more. +Writing a fully custom strategy is a bit more involved, but it provides the most +flexibility. Read the `Implementing Strategies `_ +guide to learn more. diff --git a/doc/source/index.rst b/doc/source/index.rst index 2a34693f7b26..197599d595a8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -2,15 +2,16 @@ Flower Framework Documentation ============================== .. meta:: - :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. - -Welcome to Flower's documentation. `Flower `_ is a friendly federated learning framework. + :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning. +Welcome to Flower's documentation. `Flower `_ is a friendly federated +learning framework. Join the Flower Community ------------------------- -The Flower Community is growing quickly - we're a friendly group of researchers, engineers, students, professionals, academics, and other enthusiasts. +The Flower Community is growing quickly - we're a friendly group of researchers, +engineers, students, professionals, academics, and other enthusiasts. .. button-link:: https://flower.ai/join-slack :color: primary @@ -18,13 +19,12 @@ The Flower Community is growing quickly - we're a friendly group of researchers, Join us on Slack - Flower Framework ---------------- -The user guide is targeted at researchers and developers who want to use Flower -to bring existing machine learning workloads into a federated setting. One of -Flower's design goals was to make this simple. Read on to learn more. +The user guide is targeted at researchers and developers who want to use Flower to bring +existing machine learning workloads into a federated setting. One of Flower's design +goals was to make this simple. Read on to learn more. Tutorials ~~~~~~~~~ @@ -32,44 +32,50 @@ Tutorials A learning-oriented series of federated learning tutorials, the best place to start. .. toctree:: - :maxdepth: 1 - :caption: Tutorial + :maxdepth: 1 + :caption: Tutorial - tutorial-series-what-is-federated-learning - tutorial-series-get-started-with-flower-pytorch - tutorial-series-use-a-federated-learning-strategy-pytorch - tutorial-series-build-a-strategy-from-scratch-pytorch - tutorial-series-customize-the-client-pytorch + tutorial-series-what-is-federated-learning + tutorial-series-get-started-with-flower-pytorch + tutorial-series-use-a-federated-learning-strategy-pytorch + tutorial-series-build-a-strategy-from-scratch-pytorch + tutorial-series-customize-the-client-pytorch .. toctree:: - :maxdepth: 1 - :caption: Quickstart tutorials - :hidden: - - tutorial-quickstart-pytorch - tutorial-quickstart-tensorflow - tutorial-quickstart-mlx - tutorial-quickstart-huggingface - tutorial-quickstart-jax - tutorial-quickstart-pandas - tutorial-quickstart-fastai - tutorial-quickstart-pytorch-lightning - tutorial-quickstart-scikitlearn - tutorial-quickstart-xgboost - tutorial-quickstart-android - tutorial-quickstart-ios - -QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow ` | :doc:`MLX ` | :doc:`🤗 Transformers ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai ` | :doc:`PyTorch Lightning ` | :doc:`scikit-learn ` | :doc:`XGBoost ` | :doc:`Android ` | :doc:`iOS ` + :maxdepth: 1 + :caption: Quickstart tutorials + :hidden: + + tutorial-quickstart-pytorch + tutorial-quickstart-tensorflow + tutorial-quickstart-mlx + tutorial-quickstart-huggingface + tutorial-quickstart-jax + tutorial-quickstart-pandas + tutorial-quickstart-fastai + tutorial-quickstart-pytorch-lightning + tutorial-quickstart-scikitlearn + tutorial-quickstart-xgboost + tutorial-quickstart-android + tutorial-quickstart-ios + +QUICKSTART TUTORIALS: :doc:`PyTorch ` | :doc:`TensorFlow +` | :doc:`MLX ` | :doc:`🤗 +Transformers ` | :doc:`JAX ` | +:doc:`Pandas ` | :doc:`fastai ` +| :doc:`PyTorch Lightning ` | :doc:`scikit-learn +` | :doc:`XGBoost ` | +:doc:`Android ` | :doc:`iOS ` We also made video tutorials for PyTorch: -.. youtube:: jOmmuzMIQ4c - :width: 80% +.. youtube:: jOmmuzMIQ4c + :width: 80% And TensorFlow: -.. youtube:: FGTc2TQq7VM - :width: 80% +.. youtube:: FGTc2TQq7VM + :width: 80% How-to guides ~~~~~~~~~~~~~ @@ -77,45 +83,46 @@ How-to guides Problem-oriented how-to guides show step-by-step how to achieve a specific goal. .. toctree:: - :maxdepth: 1 - :caption: How-to guides - - how-to-install-flower - how-to-configure-clients - how-to-use-strategies - how-to-implement-strategies - how-to-aggregate-evaluation-results - how-to-save-and-load-model-checkpoints - how-to-run-simulations - how-to-monitor-simulation - how-to-configure-logging - how-to-enable-ssl-connections - how-to-use-built-in-mods - how-to-use-differential-privacy - how-to-authenticate-supernodes - docker/index - how-to-upgrade-to-flower-1.0 - how-to-upgrade-to-flower-next + :maxdepth: 1 + :caption: How-to guides + + how-to-install-flower + how-to-configure-clients + how-to-use-strategies + how-to-implement-strategies + how-to-aggregate-evaluation-results + how-to-save-and-load-model-checkpoints + how-to-run-simulations + how-to-monitor-simulation + how-to-configure-logging + how-to-enable-ssl-connections + how-to-use-built-in-mods + how-to-use-differential-privacy + how-to-authenticate-supernodes + docker/index + how-to-upgrade-to-flower-1.0 + how-to-upgrade-to-flower-next .. toctree:: - :maxdepth: 1 - :caption: Legacy example guides + :maxdepth: 1 + :caption: Legacy example guides - example-pytorch-from-centralized-to-federated - example-jax-from-centralized-to-federated - example-fedbn-pytorch-from-centralized-to-federated + example-pytorch-from-centralized-to-federated + example-fedbn-pytorch-from-centralized-to-federated Explanations ~~~~~~~~~~~~ -Understanding-oriented concept guides explain and discuss key topics and underlying ideas behind Flower and collaborative AI. +Understanding-oriented concept guides explain and discuss key topics and underlying +ideas behind Flower and collaborative AI. .. toctree:: - :maxdepth: 1 - :caption: Explanations + :maxdepth: 1 + :caption: Explanations - explanation-federated-evaluation - explanation-differential-privacy + explanation-federated-evaluation + explanation-differential-privacy + explanation-flower-architecture References ~~~~~~~~~~ @@ -123,72 +130,77 @@ References Information-oriented API reference and other reference material. .. autosummary:: - :toctree: ref-api - :template: autosummary/module.rst - :caption: API reference - :recursive: + :toctree: ref-api + :template: autosummary/module.rst + :caption: API reference + :recursive: - flwr + flwr .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - ref-api-cli + ref-api-cli .. toctree:: - :maxdepth: 1 - :caption: Reference docs - - ref-example-projects - ref-telemetry - ref-changelog - ref-faq + :maxdepth: 1 + :caption: Reference docs + ref-example-projects + ref-telemetry + ref-changelog + ref-faq Contributor docs ---------------- -The Flower community welcomes contributions. The following docs are intended to help along the way. - +The Flower community welcomes contributions. The following docs are intended to help +along the way. .. toctree:: - :maxdepth: 1 - :caption: Contributor tutorials + :maxdepth: 1 + :caption: Contributor tutorials - contributor-tutorial-contribute-on-github - contributor-tutorial-get-started-as-a-contributor + contributor-tutorial-contribute-on-github + contributor-tutorial-get-started-as-a-contributor .. toctree:: - :maxdepth: 1 - :caption: Contributor how-to guides + :maxdepth: 1 + :caption: Contributor how-to guides - contributor-how-to-install-development-versions - contributor-how-to-set-up-a-virtual-env - contributor-how-to-develop-in-vscode-dev-containers - contributor-how-to-write-documentation - contributor-how-to-release-flower - contributor-how-to-contribute-translations - contributor-how-to-build-docker-images + contributor-how-to-install-development-versions + contributor-how-to-set-up-a-virtual-env + contributor-how-to-develop-in-vscode-dev-containers + contributor-how-to-write-documentation + contributor-how-to-release-flower + contributor-how-to-contribute-translations + contributor-how-to-build-docker-images .. toctree:: - :maxdepth: 1 - :caption: Contributor explanations + :maxdepth: 1 + :caption: Contributor explanations - contributor-explanation-architecture - contributor-explanation-public-and-private-apis + contributor-explanation-public-and-private-apis .. toctree:: - :maxdepth: 1 - :caption: Contributor references + :maxdepth: 1 + :caption: Contributor references + + fed/index + contributor-ref-good-first-contributions + contributor-ref-secure-aggregation-protocols + +.. + Indices and tables - fed/index - contributor-ref-good-first-contributions - contributor-ref-secure-aggregation-protocols +.. + ------------------ +.. + * :ref:`genindex` -.. Indices and tables -.. ------------------ +.. + * :ref:`modindex` -.. * :ref:`genindex` -.. * :ref:`modindex` -.. * :ref:`search` +.. + * :ref:`search` diff --git a/doc/source/ref-api-cli.rst b/doc/source/ref-api-cli.rst index ff1a9606f58d..e95132bbadba 100644 --- a/doc/source/ref-api-cli.rst +++ b/doc/source/ref-api-cli.rst @@ -4,58 +4,66 @@ Flower CLI reference .. _flwr-apiref: flwr CLI -~~~~~~~~ +-------- .. click:: flwr.cli.app:typer_click_object - :prog: flwr - :nested: full + :prog: flwr + :nested: full .. _flower-simulation-apiref: flower-simulation -~~~~~~~~~~~~~~~~~ +----------------- .. argparse:: - :module: flwr.simulation.run_simulation - :func: _parse_args_run_simulation - :prog: flower-simulation + :module: flwr.simulation.run_simulation + :func: _parse_args_run_simulation + :prog: flower-simulation .. _flower-superlink-apiref: flower-superlink -~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.server.app - :func: _parse_args_run_superlink - :prog: flower-superlink + :module: flwr.server.app + :func: _parse_args_run_superlink + :prog: flower-superlink .. _flower-supernode-apiref: flower-supernode -~~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.client.supernode.app - :func: _parse_args_run_supernode - :prog: flower-supernode + :module: flwr.client.supernode.app + :func: _parse_args_run_supernode + :prog: flower-supernode .. _flower-server-app-apiref: flower-server-app -~~~~~~~~~~~~~~~~~ +----------------- + +.. note:: + + Note that since version ``1.11.0``, ``flower-server-app`` no longer supports passing + a reference to a `ServerApp` attribute. Instead, you need to pass the path to Flower + app via the argument ``--app``. This is the path to a directory containing a + `pyproject.toml`. You can create a valid Flower app by executing ``flwr new`` and + following the prompt. .. argparse:: - :module: flwr.server.run_serverapp - :func: _parse_args_run_server_app - :prog: flower-server-app + :module: flwr.server.run_serverapp + :func: _parse_args_run_server_app + :prog: flower-server-app .. _flower-superexec-apiref: flower-superexec -~~~~~~~~~~~~~~~~~ +---------------- .. argparse:: - :module: flwr.superexec.app - :func: _parse_args_run_superexec - :prog: flower-superexec \ No newline at end of file + :module: flwr.superexec.app + :func: _parse_args_run_superexec + :prog: flower-superexec diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 531afb9ada52..f88a75feabd3 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -1,6 +1,210 @@ # Changelog -## Unreleased +## v1.12.0 (2024-10-14) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Audris`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Heng Pan`, `Javier`, `Jiahao Tan`, `Julian Rußmeyer`, `Mohammad Naseri`, `Ray Sun`, `Robert Steiner`, `Yan Gao`, `xiliguguagua` + +### What's new? + +- **Introduce SuperExec log streaming** ([#3577](https://github.com/adap/flower/pull/3577), [#3584](https://github.com/adap/flower/pull/3584), [#4242](https://github.com/adap/flower/pull/4242), [#3611](https://github.com/adap/flower/pull/3611), [#3613](https://github.com/adap/flower/pull/3613)) + + Flower now supports log streaming from a remote SuperExec using the `flwr log` command. This new feature allows you to monitor logs from SuperExec in real time via `flwr log ` (or `flwr log `). + +- **Improve `flwr new` templates** ([#4291](https://github.com/adap/flower/pull/4291), [#4292](https://github.com/adap/flower/pull/4292), [#4293](https://github.com/adap/flower/pull/4293), [#4294](https://github.com/adap/flower/pull/4294), [#4295](https://github.com/adap/flower/pull/4295)) + + The `flwr new` command templates for MLX, NumPy, sklearn, JAX, and PyTorch have been updated to improve usability and consistency across frameworks. + +- **Migrate ID handling to use unsigned 64-bit integers** ([#4170](https://github.com/adap/flower/pull/4170), [#4237](https://github.com/adap/flower/pull/4237), [#4243](https://github.com/adap/flower/pull/4243)) + + Node IDs, run IDs, and related fields have been migrated from signed 64-bit integers (`sint64`) to unsigned 64-bit integers (`uint64`). To support this change, the `uint64` type is fully supported in all communications. You may now use `uint64` values in config and metric dictionaries. For Python users, that means using `int` values larger than the maximum value of `sint64` but less than the maximum value of `uint64`. + +- **Add Flower architecture explanation** ([#3270](https://github.com/adap/flower/pull/3270)) + + A new [Flower architecture explainer](https://flower.ai/docs/framework/explanation-flower-architecture.html) page introduces Flower components step-by-step. Check out the `EXPLANATIONS` section of the Flower documentation if you're interested. + +- **Introduce FedRep baseline** ([#3790](https://github.com/adap/flower/pull/3790)) + + FedRep is a federated learning algorithm that learns shared data representations across clients while allowing each to maintain personalized local models, balancing collaboration and individual adaptation. Read all the details in the paper: "Exploiting Shared Representations for Personalized Federated Learning" ([arxiv](https://arxiv.org/abs/2102.07078)) + +- **Improve FlowerTune template and LLM evaluation pipelines** ([#4286](https://github.com/adap/flower/pull/4286), [#3769](https://github.com/adap/flower/pull/3769), [#4272](https://github.com/adap/flower/pull/4272), [#4257](https://github.com/adap/flower/pull/4257), [#4220](https://github.com/adap/flower/pull/4220), [#4282](https://github.com/adap/flower/pull/4282), [#4171](https://github.com/adap/flower/pull/4171), [#4228](https://github.com/adap/flower/pull/4228), [#4258](https://github.com/adap/flower/pull/4258), [#4296](https://github.com/adap/flower/pull/4296), [#4287](https://github.com/adap/flower/pull/4287), [#4217](https://github.com/adap/flower/pull/4217), [#4249](https://github.com/adap/flower/pull/4249), [#4324](https://github.com/adap/flower/pull/4324), [#4219](https://github.com/adap/flower/pull/4219), [#4327](https://github.com/adap/flower/pull/4327)) + + Refined evaluation pipelines, metrics, and documentation for the upcoming FlowerTune LLM Leaderboard across multiple domains including Finance, Medical, and general NLP. Stay tuned for the official launch—we welcome all federated learning and LLM enthusiasts to participate in this exciting challenge! + +- **Enhance Docker Support and Documentation** ([#4191](https://github.com/adap/flower/pull/4191), [#4251](https://github.com/adap/flower/pull/4251), [#4190](https://github.com/adap/flower/pull/4190), [#3928](https://github.com/adap/flower/pull/3928), [#4298](https://github.com/adap/flower/pull/4298), [#4192](https://github.com/adap/flower/pull/4192), [#4136](https://github.com/adap/flower/pull/4136), [#4187](https://github.com/adap/flower/pull/4187), [#4261](https://github.com/adap/flower/pull/4261), [#4177](https://github.com/adap/flower/pull/4177), [#4176](https://github.com/adap/flower/pull/4176), [#4189](https://github.com/adap/flower/pull/4189), [#4297](https://github.com/adap/flower/pull/4297), [#4226](https://github.com/adap/flower/pull/4226)) + + Upgraded Ubuntu base image to 24.04, added SBOM and gcc to Docker images, and comprehensively updated [Docker documentation](https://flower.ai/docs/framework/docker/index.html) including quickstart guides and distributed Docker Compose instructions. + +- **Introduce Flower glossary** ([#4165](https://github.com/adap/flower/pull/4165), [#4235](https://github.com/adap/flower/pull/4235)) + + Added the [Federated Learning glossary](https://flower.ai/glossary/) to the Flower repository, located under the `flower/glossary/` directory. This resource aims to provide clear definitions and explanations of key FL concepts. Community contributions are highly welcomed to help expand and refine this knowledge base — this is probably the easiest way to become a Flower contributor! + +- **Implement Message Time-to-Live (TTL)** ([#3620](https://github.com/adap/flower/pull/3620), [#3596](https://github.com/adap/flower/pull/3596), [#3615](https://github.com/adap/flower/pull/3615), [#3609](https://github.com/adap/flower/pull/3609), [#3635](https://github.com/adap/flower/pull/3635)) + + Added comprehensive TTL support for messages in Flower's SuperLink. Messages are now automatically expired and cleaned up based on configurable TTL values, available through the low-level API (and used by default in the high-level API). + +- **Improve FAB handling** ([#4303](https://github.com/adap/flower/pull/4303), [#4264](https://github.com/adap/flower/pull/4264), [#4305](https://github.com/adap/flower/pull/4305), [#4304](https://github.com/adap/flower/pull/4304)) + + An 8-character hash is now appended to the FAB file name. The `flwr install` command installs FABs with a more flattened folder structure, reducing it from 3 levels to 1. + +- **Update documentation** ([#3341](https://github.com/adap/flower/pull/3341), [#3338](https://github.com/adap/flower/pull/3338), [#3927](https://github.com/adap/flower/pull/3927), [#4152](https://github.com/adap/flower/pull/4152), [#4151](https://github.com/adap/flower/pull/4151), [#3993](https://github.com/adap/flower/pull/3993)) + + Updated quickstart tutorials (PyTorch Lightning, TensorFlow, Hugging Face, Fastai) to use the new `flwr run` command and removed default title from documentation base template. A new blockchain example has been added to FAQ. + +- **Update example projects** ([#3716](https://github.com/adap/flower/pull/3716), [#4007](https://github.com/adap/flower/pull/4007), [#4130](https://github.com/adap/flower/pull/4130), [#4234](https://github.com/adap/flower/pull/4234), [#4206](https://github.com/adap/flower/pull/4206), [#4188](https://github.com/adap/flower/pull/4188), [#4247](https://github.com/adap/flower/pull/4247), [#4331](https://github.com/adap/flower/pull/4331)) + + Refreshed multiple example projects including vertical FL, PyTorch (advanced), Pandas, Secure Aggregation, and XGBoost examples. Optimized Hugging Face quickstart with a smaller language model and removed legacy simulation examples. + +- **Update translations** ([#4070](https://github.com/adap/flower/pull/4070), [#4316](https://github.com/adap/flower/pull/4316), [#4252](https://github.com/adap/flower/pull/4252), [#4256](https://github.com/adap/flower/pull/4256), [#4210](https://github.com/adap/flower/pull/4210), [#4263](https://github.com/adap/flower/pull/4263), [#4259](https://github.com/adap/flower/pull/4259)) + +- **General improvements** ([#4239](https://github.com/adap/flower/pull/4239), [4276](https://github.com/adap/flower/pull/4276), [4204](https://github.com/adap/flower/pull/4204), [4184](https://github.com/adap/flower/pull/4184), [4227](https://github.com/adap/flower/pull/4227), [4183](https://github.com/adap/flower/pull/4183), [4202](https://github.com/adap/flower/pull/4202), [4250](https://github.com/adap/flower/pull/4250), [4267](https://github.com/adap/flower/pull/4267), [4246](https://github.com/adap/flower/pull/4246), [4240](https://github.com/adap/flower/pull/4240), [4265](https://github.com/adap/flower/pull/4265), [4238](https://github.com/adap/flower/pull/4238), [4275](https://github.com/adap/flower/pull/4275), [4318](https://github.com/adap/flower/pull/4318), [#4178](https://github.com/adap/flower/pull/4178), [#4315](https://github.com/adap/flower/pull/4315), [#4241](https://github.com/adap/flower/pull/4241), [#4289](https://github.com/adap/flower/pull/4289), [#4290](https://github.com/adap/flower/pull/4290), [#4181](https://github.com/adap/flower/pull/4181), [#4208](https://github.com/adap/flower/pull/4208), [#4225](https://github.com/adap/flower/pull/4225), [#4314](https://github.com/adap/flower/pull/4314), [#4174](https://github.com/adap/flower/pull/4174), [#4203](https://github.com/adap/flower/pull/4203), [#4274](https://github.com/adap/flower/pull/4274), [#3154](https://github.com/adap/flower/pull/3154), [#4201](https://github.com/adap/flower/pull/4201), [#4268](https://github.com/adap/flower/pull/4268), [#4254](https://github.com/adap/flower/pull/4254), [#3990](https://github.com/adap/flower/pull/3990), [#4212](https://github.com/adap/flower/pull/4212), [#2938](https://github.com/adap/flower/pull/2938), [#4205](https://github.com/adap/flower/pull/4205), [#4222](https://github.com/adap/flower/pull/4222), [#4313](https://github.com/adap/flower/pull/4313), [#3936](https://github.com/adap/flower/pull/3936), [#4278](https://github.com/adap/flower/pull/4278), [#4319](https://github.com/adap/flower/pull/4319), [#4332](https://github.com/adap/flower/pull/4332), [#4333](https://github.com/adap/flower/pull/4333)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Incompatible changes + +- **Drop Python 3.8 support and update minimum version to 3.9** ([#4180](https://github.com/adap/flower/pull/4180), [#4213](https://github.com/adap/flower/pull/4213), [#4193](https://github.com/adap/flower/pull/4193), [#4199](https://github.com/adap/flower/pull/4199), [#4196](https://github.com/adap/flower/pull/4196), [#4195](https://github.com/adap/flower/pull/4195), [#4198](https://github.com/adap/flower/pull/4198), [#4194](https://github.com/adap/flower/pull/4194)) + + Python 3.8 support was deprecated in Flower 1.9, and this release removes support. Flower now requires Python 3.9 or later (Python 3.11 is recommended). CI and documentation were updated to use Python 3.9 as the minimum supported version. Flower now supports Python 3.9 to 3.12. + +## v1.11.1 (2024-09-11) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, `Javier`, `Robert Steiner`, `Yan Gao` + +### Improvements + +- **Implement** `keys/values/items` **methods for** `TypedDict` ([#4146](https://github.com/adap/flower/pull/4146)) + +- **Fix parsing of** `--executor-config` **if present** ([#4125](https://github.com/adap/flower/pull/4125)) + +- **Adjust framework name in templates docstrings** ([#4127](https://github.com/adap/flower/pull/4127)) + +- **Update** `flwr new` **Hugging Face template** ([#4169](https://github.com/adap/flower/pull/4169)) + +- **Fix** `flwr new` **FlowerTune template** ([#4123](https://github.com/adap/flower/pull/4123)) + +- **Add buffer time after** `ServerApp` **thread initialization** ([#4119](https://github.com/adap/flower/pull/4119)) + +- **Handle unsuitable resources for simulation** ([#4143](https://github.com/adap/flower/pull/4143)) + +- **Update example READMEs** ([#4117](https://github.com/adap/flower/pull/4117)) + +- **Update SuperNode authentication docs** ([#4160](https://github.com/adap/flower/pull/4160)) + +### Incompatible changes + +None + +## v1.11.0 (2024-08-30) + +### Thanks to our contributors + +We would like to give our special thanks to all the contributors who made the new version of Flower possible (in `git shortlog` order): + +`Adam Narozniak`, `Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Daniel Nata Nugraha`, `Danny`, `Edoardo Gabrielli`, `Heng Pan`, `Javier`, `Meng Yan`, `Michal Danilowski`, `Mohammad Naseri`, `Robert Steiner`, `Steve Laskaridis`, `Taner Topal`, `Yan Gao` + +### What's new? + +- **Deliver Flower App Bundle (FAB) to SuperLink and SuperNodes** ([#4006](https://github.com/adap/flower/pull/4006), [#3945](https://github.com/adap/flower/pull/3945), [#3999](https://github.com/adap/flower/pull/3999), [#4027](https://github.com/adap/flower/pull/4027), [#3851](https://github.com/adap/flower/pull/3851), [#3946](https://github.com/adap/flower/pull/3946), [#4003](https://github.com/adap/flower/pull/4003), [#4029](https://github.com/adap/flower/pull/4029), [#3942](https://github.com/adap/flower/pull/3942), [#3957](https://github.com/adap/flower/pull/3957), [#4020](https://github.com/adap/flower/pull/4020), [#4044](https://github.com/adap/flower/pull/4044), [#3852](https://github.com/adap/flower/pull/3852), [#4019](https://github.com/adap/flower/pull/4019), [#4031](https://github.com/adap/flower/pull/4031), [#4036](https://github.com/adap/flower/pull/4036), [#4049](https://github.com/adap/flower/pull/4049), [#4017](https://github.com/adap/flower/pull/4017), [#3943](https://github.com/adap/flower/pull/3943), [#3944](https://github.com/adap/flower/pull/3944), [#4011](https://github.com/adap/flower/pull/4011), [#3619](https://github.com/adap/flower/pull/3619)) + + Dynamic code updates are here! `flwr run` can now ship and install the latest version of your `ServerApp` and `ClientApp` to an already-running federation (SuperLink and SuperNodes). + + How does it work? `flwr run` bundles your Flower app into a single FAB (Flower App Bundle) file. It then ships this FAB file, via the SuperExec, to both the SuperLink and those SuperNodes that need it. This allows you to keep SuperExec, SuperLink and SuperNodes running as permanent infrastructure, and then ship code updates (including completely new projects!) dynamically. + + `flwr run` is all you need. + +- **Introduce isolated** `ClientApp` **execution** ([#3970](https://github.com/adap/flower/pull/3970), [#3976](https://github.com/adap/flower/pull/3976), [#4002](https://github.com/adap/flower/pull/4002), [#4001](https://github.com/adap/flower/pull/4001), [#4034](https://github.com/adap/flower/pull/4034), [#4037](https://github.com/adap/flower/pull/4037), [#3977](https://github.com/adap/flower/pull/3977), [#4042](https://github.com/adap/flower/pull/4042), [#3978](https://github.com/adap/flower/pull/3978), [#4039](https://github.com/adap/flower/pull/4039), [#4033](https://github.com/adap/flower/pull/4033), [#3971](https://github.com/adap/flower/pull/3971), [#4035](https://github.com/adap/flower/pull/4035), [#3973](https://github.com/adap/flower/pull/3973), [#4032](https://github.com/adap/flower/pull/4032)) + + The SuperNode can now run your `ClientApp` in a fully isolated way. In an enterprise deployment, this allows you to set strict limits on what the `ClientApp` can and cannot do. + + `flower-supernode` supports three `--isolation` modes: + + - Unset: The SuperNode runs the `ClientApp` in the same process (as in previous versions of Flower). This is the default mode. + - `--isolation=subprocess`: The SuperNode starts a subprocess to run the `ClientApp`. + - `--isolation=process`: The SuperNode expects an externally-managed process to run the `ClientApp`. This external process is not managed by the SuperNode, so it has to be started beforehand and terminated manually. The common way to use this isolation mode is via the new `flwr/clientapp` Docker image. + +- **Improve Docker support for enterprise deployments** ([#4050](https://github.com/adap/flower/pull/4050), [#4090](https://github.com/adap/flower/pull/4090), [#3784](https://github.com/adap/flower/pull/3784), [#3998](https://github.com/adap/flower/pull/3998), [#4094](https://github.com/adap/flower/pull/4094), [#3722](https://github.com/adap/flower/pull/3722)) + + Flower 1.11 ships many Docker improvements that are especially useful for enterprise deployments: + + - `flwr/supernode` comes with a new Alpine Docker image. + - `flwr/clientapp` is a new image to be used with the `--isolation=process` option. In this mode, SuperNode and `ClientApp` run in two different Docker containers. `flwr/supernode` (preferably the Alpine version) runs the long-running SuperNode with `--isolation=process`. `flwr/clientapp` runs the `ClientApp`. This is the recommended way to deploy Flower in enterprise settings. + - New all-in-one Docker Compose enables you to easily start a full Flower Deployment Engine on a single machine. + - Completely new Docker documentation: https://flower.ai/docs/framework/docker/index.html + +- **Improve SuperNode authentication** ([#4043](https://github.com/adap/flower/pull/4043), [#4047](https://github.com/adap/flower/pull/4047), [#4074](https://github.com/adap/flower/pull/4074)) + + SuperNode auth has been improved in several ways, including improved logging, improved testing, and improved error handling. + +- **Update** `flwr new` **templates** ([#3933](https://github.com/adap/flower/pull/3933), [#3894](https://github.com/adap/flower/pull/3894), [#3930](https://github.com/adap/flower/pull/3930), [#3931](https://github.com/adap/flower/pull/3931), [#3997](https://github.com/adap/flower/pull/3997), [#3979](https://github.com/adap/flower/pull/3979), [#3965](https://github.com/adap/flower/pull/3965), [#4013](https://github.com/adap/flower/pull/4013), [#4064](https://github.com/adap/flower/pull/4064)) + + All `flwr new` templates have been updated to show the latest recommended use of Flower APIs. + +- **Improve Simulation Engine** ([#4095](https://github.com/adap/flower/pull/4095), [#3913](https://github.com/adap/flower/pull/3913), [#4059](https://github.com/adap/flower/pull/4059), [#3954](https://github.com/adap/flower/pull/3954), [#4071](https://github.com/adap/flower/pull/4071), [#3985](https://github.com/adap/flower/pull/3985), [#3988](https://github.com/adap/flower/pull/3988)) + + The Flower Simulation Engine comes with several updates, including improved run config support, verbose logging, simulation backend configuration via `flwr run`, and more. + +- **Improve** `RecordSet` ([#4052](https://github.com/adap/flower/pull/4052), [#3218](https://github.com/adap/flower/pull/3218), [#4016](https://github.com/adap/flower/pull/4016)) + + `RecordSet` is the core object to exchange model parameters, configuration values and metrics between `ClientApp` and `ServerApp`. This release ships several smaller improvements to `RecordSet` and related `*Record` types. + +- **Update documentation** ([#3972](https://github.com/adap/flower/pull/3972), [#3925](https://github.com/adap/flower/pull/3925), [#4061](https://github.com/adap/flower/pull/4061), [#3984](https://github.com/adap/flower/pull/3984), [#3917](https://github.com/adap/flower/pull/3917), [#3900](https://github.com/adap/flower/pull/3900), [#4066](https://github.com/adap/flower/pull/4066), [#3765](https://github.com/adap/flower/pull/3765), [#4021](https://github.com/adap/flower/pull/4021), [#3906](https://github.com/adap/flower/pull/3906), [#4063](https://github.com/adap/flower/pull/4063), [#4076](https://github.com/adap/flower/pull/4076), [#3920](https://github.com/adap/flower/pull/3920), [#3916](https://github.com/adap/flower/pull/3916)) + + Many parts of the documentation, including the main tutorial, have been migrated to show new Flower APIs and other new Flower features like the improved Docker support. + +- **Migrate code example to use new Flower APIs** ([#3758](https://github.com/adap/flower/pull/3758), [#3701](https://github.com/adap/flower/pull/3701), [#3919](https://github.com/adap/flower/pull/3919), [#3918](https://github.com/adap/flower/pull/3918), [#3934](https://github.com/adap/flower/pull/3934), [#3893](https://github.com/adap/flower/pull/3893), [#3833](https://github.com/adap/flower/pull/3833), [#3922](https://github.com/adap/flower/pull/3922), [#3846](https://github.com/adap/flower/pull/3846), [#3777](https://github.com/adap/flower/pull/3777), [#3874](https://github.com/adap/flower/pull/3874), [#3873](https://github.com/adap/flower/pull/3873), [#3935](https://github.com/adap/flower/pull/3935), [#3754](https://github.com/adap/flower/pull/3754), [#3980](https://github.com/adap/flower/pull/3980), [#4089](https://github.com/adap/flower/pull/4089), [#4046](https://github.com/adap/flower/pull/4046), [#3314](https://github.com/adap/flower/pull/3314), [#3316](https://github.com/adap/flower/pull/3316), [#3295](https://github.com/adap/flower/pull/3295), [#3313](https://github.com/adap/flower/pull/3313)) + + Many code examples have been migrated to use new Flower APIs. + +- **Update Flower framework, framework internals and quality infrastructure** ([#4018](https://github.com/adap/flower/pull/4018), [#4053](https://github.com/adap/flower/pull/4053), [#4098](https://github.com/adap/flower/pull/4098), [#4067](https://github.com/adap/flower/pull/4067), [#4105](https://github.com/adap/flower/pull/4105), [#4048](https://github.com/adap/flower/pull/4048), [#4107](https://github.com/adap/flower/pull/4107), [#4069](https://github.com/adap/flower/pull/4069), [#3915](https://github.com/adap/flower/pull/3915), [#4101](https://github.com/adap/flower/pull/4101), [#4108](https://github.com/adap/flower/pull/4108), [#3914](https://github.com/adap/flower/pull/3914), [#4068](https://github.com/adap/flower/pull/4068), [#4041](https://github.com/adap/flower/pull/4041), [#4040](https://github.com/adap/flower/pull/4040), [#3986](https://github.com/adap/flower/pull/3986), [#4026](https://github.com/adap/flower/pull/4026), [#3961](https://github.com/adap/flower/pull/3961), [#3975](https://github.com/adap/flower/pull/3975), [#3983](https://github.com/adap/flower/pull/3983), [#4091](https://github.com/adap/flower/pull/4091), [#3982](https://github.com/adap/flower/pull/3982), [#4079](https://github.com/adap/flower/pull/4079), [#4073](https://github.com/adap/flower/pull/4073), [#4060](https://github.com/adap/flower/pull/4060), [#4106](https://github.com/adap/flower/pull/4106), [#4080](https://github.com/adap/flower/pull/4080), [#3974](https://github.com/adap/flower/pull/3974), [#3996](https://github.com/adap/flower/pull/3996), [#3991](https://github.com/adap/flower/pull/3991), [#3981](https://github.com/adap/flower/pull/3981), [#4093](https://github.com/adap/flower/pull/4093), [#4100](https://github.com/adap/flower/pull/4100), [#3939](https://github.com/adap/flower/pull/3939), [#3955](https://github.com/adap/flower/pull/3955), [#3940](https://github.com/adap/flower/pull/3940), [#4038](https://github.com/adap/flower/pull/4038)) + + As always, many parts of the Flower framework and quality infrastructure were improved and updated. + +### Deprecations + +- **Deprecate accessing `Context` via `Client.context`** ([#3797](https://github.com/adap/flower/pull/3797)) + + Now that both `client_fn` and `server_fn` receive a `Context` object, accessing `Context` via `Client.context` is deprecated. `Client.context` will be removed in a future release. If you need to access `Context` in your `Client` implementation, pass it manually when creating the `Client` instance in `client_fn`: + + ```python + def client_fn(context: Context) -> Client: + return FlowerClient(context).to_client() + ``` + +### Incompatible changes + +- **Update CLIs to accept an app directory instead of** `ClientApp` **and** `ServerApp` ([#3952](https://github.com/adap/flower/pull/3952), [#4077](https://github.com/adap/flower/pull/4077), [#3850](https://github.com/adap/flower/pull/3850)) + + The CLI commands `flower-supernode` and `flower-server-app` now accept an app directory as argument (instead of references to a `ClientApp` or `ServerApp`). An app directory is any directory containing a `pyproject.toml` file (with the appropriate Flower config fields set). The easiest way to generate a compatible project structure is to use `flwr new`. + +- **Disable** `flower-client-app` **CLI command** ([#4022](https://github.com/adap/flower/pull/4022)) + + `flower-client-app` has been disabled. Use `flower-supernode` instead. + +- **Use spaces instead of commas for separating config args** ([#4000](https://github.com/adap/flower/pull/4000)) + + When passing configs (run config, node config) to Flower, you now need to separate key-value pairs using spaces instead of commas. For example: + + ```bash + flwr run . --run-config "learning-rate=0.01 num_rounds=10" # Works + ``` + + Previously, you could pass configs using commas, like this: + + ```bash + flwr run . --run-config "learning-rate=0.01,num_rounds=10" # Doesn't work + ``` + +- **Remove** `flwr example` **CLI command** ([#4084](https://github.com/adap/flower/pull/4084)) + + The experimental `flwr example` CLI command has been removed. Use `flwr new` to generate a project and then run it using `flwr run`. ## v1.10.0 (2024-07-24) diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst index 597e3a596c51..4f0a3014e1d4 100644 --- a/doc/source/ref-example-projects.rst +++ b/doc/source/ref-example-projects.rst @@ -1,48 +1,52 @@ Example projects ================ -Flower comes with a number of usage examples. The examples demonstrate how -Flower can be used to federate different kinds of existing machine learning -pipelines, usually leveraging popular machine learning frameworks such as -`PyTorch `_ or -`TensorFlow `_. +Flower comes with a number of usage examples. The examples demonstrate how Flower can be +used to federate different kinds of existing machine learning pipelines, usually +leveraging popular machine learning frameworks such as `PyTorch `_ +or `TensorFlow `_. The following examples are available as standalone projects. + Quickstart TensorFlow/Keras --------------------------- -The TensorFlow/Keras quickstart example shows CIFAR-10 image classification -with MobileNetV2: +The TensorFlow/Keras quickstart example shows CIFAR-10 image classification with +MobileNetV2: -- `Quickstart TensorFlow (Code) `_ +- `Quickstart TensorFlow (Code) + `_ - :doc:`Quickstart TensorFlow (Tutorial) ` -- `Quickstart TensorFlow (Blog Post) `_ - +- `Quickstart TensorFlow (Blog Post) + `_ Quickstart PyTorch ------------------ -The PyTorch quickstart example shows CIFAR-10 image classification -with a simple Convolutional Neural Network: +The PyTorch quickstart example shows CIFAR-10 image classification with a simple +Convolutional Neural Network: -- `Quickstart PyTorch (Code) `_ +- `Quickstart PyTorch (Code) + `_ - :doc:`Quickstart PyTorch (Tutorial) ` - PyTorch: From Centralized To Federated -------------------------------------- This example shows how a regular PyTorch project can be federated using Flower: -- `PyTorch: From Centralized To Federated (Code) `_ -- :doc:`PyTorch: From Centralized To Federated (Tutorial) ` - +- `PyTorch: From Centralized To Federated (Code) + `_ +- :doc:`PyTorch: From Centralized To Federated (Tutorial) + ` Federated Learning on Raspberry Pi and Nvidia Jetson ---------------------------------------------------- -This example shows how Flower can be used to build a federated learning system that run across Raspberry Pi and Nvidia Jetson: - -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) `_ -- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) `_ +This example shows how Flower can be used to build a federated learning system that run +across Raspberry Pi and Nvidia Jetson: +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Code) + `_ +- `Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) + `_ diff --git a/doc/source/ref-faq.rst b/doc/source/ref-faq.rst index 26b7dca4a0a7..0bd004f81858 100644 --- a/doc/source/ref-faq.rst +++ b/doc/source/ref-faq.rst @@ -1,7 +1,8 @@ FAQ === -This page collects answers to commonly asked questions about Federated Learning with Flower. +This page collects answers to commonly asked questions about Federated Learning with +Flower. .. dropdown:: :fa:`eye,mr-1` Can Flower run on Jupyter Notebooks / Google Colab? @@ -25,6 +26,9 @@ This page collects answers to commonly asked questions about Federated Learning Yes, of course. A list of available examples using Flower within a blockchain environment is available here: + * `FLock: A Decentralised AI Training Platform `_. + * Contribute to on-chain training the model and earn rewards. + * Local blockchain with federated learning simulation. * `Flower meets Nevermined GitHub Repository `_. * `Flower meets Nevermined YouTube video `_. * `Flower meets KOSMoS `_. diff --git a/doc/source/tutorial-quickstart-android.rst b/doc/source/tutorial-quickstart-android.rst index 9177236d5a7c..f2691203078c 100644 --- a/doc/source/tutorial-quickstart-android.rst +++ b/doc/source/tutorial-quickstart-android.rst @@ -1,12 +1,12 @@ .. _quickstart-android: - Quickstart Android ================== .. meta:: - :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. + :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower. Let's build a federated learning system using TFLite and Flower on Android! -Please refer to the `full code example `_ to learn more. +Please refer to the `full code example +`_ to learn more. diff --git a/doc/source/tutorial-quickstart-fastai.rst b/doc/source/tutorial-quickstart-fastai.rst index 63f5ac176082..d52c570b0195 100644 --- a/doc/source/tutorial-quickstart-fastai.rst +++ b/doc/source/tutorial-quickstart-fastai.rst @@ -1,12 +1,110 @@ .. _quickstart-fastai: - Quickstart fastai ================= -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with FastAI to train a vision model on CIFAR-10. +In this federated learning tutorial we will learn how to train a SqueezeNet model on +MNIST using Flower and fastai. It is recommended to create a virtual environment and run +everything within a :doc:`virtualenv `. + +Then, clone the code example directly from GitHub: + +.. code-block:: shell + + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-fastai . \ + && rm -rf _tmp && cd quickstart-fastai + +This will create a new directory called `quickstart-fastai` containing the following +files: + +.. code-block:: shell + + quickstart-fastai + ├── fastai_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +Next, activate your environment, then run: + +.. code-block:: shell + + # Navigate to the example directory + $ cd path/to/quickstart-fastai + + # Install project and dependencies + $ pip install -e . + +This example by default runs the Flower Simulation Engine, creating a federation of 10 +nodes using `FedAvg +`_ +as the aggregation strategy. The dataset will be partitioned using Flower Dataset's +`IidPartitioner +`_. +Let's run the project: + +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 5 clients (out of 10) + INFO : aggregate_evaluate: received 5 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 143.02s + INFO : History (loss, distributed): + INFO : round 1: 2.699497365951538 + INFO : round 2: 0.9549586296081543 + INFO : round 3: 0.6627192616462707 + INFO : History (metrics, distributed, evaluate): + INFO : {'accuracy': [(1, 0.09766666889190674), + INFO : (2, 0.6948333323001862), + INFO : (3, 0.7721666693687439)]} + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 -Let's build a federated learning system using fastai and Flower! +.. note:: -Please refer to the `full code example `_ to learn more. + Check the `source code + `_ of this + tutorial in ``examples/quickstart-fasai`` in the Flower GitHub repository. diff --git a/doc/source/tutorial-quickstart-huggingface.rst b/doc/source/tutorial-quickstart-huggingface.rst index 7d8128230901..3c9d3981e587 100644 --- a/doc/source/tutorial-quickstart-huggingface.rst +++ b/doc/source/tutorial-quickstart-huggingface.rst @@ -1,111 +1,219 @@ .. _quickstart-huggingface: - Quickstart 🤗 Transformers ========================== -.. meta:: - :description: Check out this Federating Learning quickstart tutorial for using Flower with HuggingFace Transformers in order to fine-tune an LLM. +In this federated learning tutorial we will learn how to train a large language model +(LLM) on the `IMDB `_ dataset using +Flower and the 🤗 Hugging Face Transformers library. It is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. + +Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face project. It will +generate all the files needed to run, by default with the Flower Simulation Engine, a +federation of 10 nodes using |fedavg|_ The dataset will be partitioned using +|flowerdatasets|_'s |iidpartitioner|_. + +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: + +.. code-block:: shell + + # In a new Python environment + $ pip install flwr + +Then, run the command below. You will be prompted to select one of the available +templates (choose ``HuggingFace``), give a name to your project, and type in your +developer name: + +.. code-block:: shell + + $ flwr new + +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: + +.. code-block:: shell + + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +If you haven't yet installed the project and its dependencies, you can do so by: + +.. code-block:: shell + + # From the directory where your pyproject.toml is + $ pip install -e . + +To run the project, do: -Let's build a federated learning system using Hugging Face Transformers and Flower! +.. code-block:: shell + + # Run with default arguments + $ flwr run . -We will leverage Hugging Face to federate the training of language models over multiple clients using Flower. -More specifically, we will fine-tune a pre-trained Transformer model (distilBERT) -for sequence classification over a dataset of IMDB ratings. -The end goal is to detect if a movie rating is positive or negative. +With default arguments you will see an output like this one: -Dependencies ------------- +.. code-block:: shell -To follow along this tutorial you will need to install the following packages: -:code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, and :code:`transformers`. -This can be done using :code:`pip`: + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 10) + INFO : aggregate_fit: received 2 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 249.11s + INFO : History (loss, distributed): + INFO : round 1: 0.02111011856794357 + INFO : round 2: 0.019722302150726317 + INFO : round 3: 0.018227258533239362 + INFO : + +You can also run the project with GPU as follows: .. code-block:: shell - $ pip install datasets evaluate flwr torch transformers + # Run with default arguments + $ flwr run . localhost-gpu + +This will use the default arguments where each ``ClientApp`` will use 2 CPUs and at most +4 ``ClientApp``\s will run in a given GPU. + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 fraction-fit=0.2" -Standard Hugging Face workflow ------------------------------- +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. -Handling the data -^^^^^^^^^^^^^^^^^ +The Data +-------- -To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` library. -We then need to tokenize the data and create :code:`PyTorch` dataloaders, -this is all done in the :code:`load_data` function: +This tutorial uses |flowerdatasets|_ to easily download and partition the `IMDB +`_ dataset. In this example you'll +make use of the |iidpartitioner|_ to generate ``num_partitions`` partitions. You can +choose |otherpartitioners|_ available in Flower Datasets. To tokenize the text, we will +also load the tokenizer from the pre-trained Transformer model that we'll use during +training - more on that in the next section. Each ``ClientApp`` will call this function +to create dataloaders with the data that correspond to their data partition. .. code-block:: python - import random - import torch - from datasets import load_dataset - from torch.utils.data import DataLoader - from transformers import AutoTokenizer, DataCollatorWithPadding - - DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - CHECKPOINT = "distilbert-base-uncased" - - def load_data(): - """Load IMDB data (training and eval)""" - raw_datasets = load_dataset("imdb") - raw_datasets = raw_datasets.shuffle(seed=42) - # remove unnecessary data split - del raw_datasets["unsupervised"] - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) - def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) - # We will take a small sample in order to reduce the compute time, this is optional - train_population = random.sample(range(len(raw_datasets["train"])), 100) - test_population = random.sample(range(len(raw_datasets["test"])), 100) - tokenized_datasets = raw_datasets.map(tokenize_function, batched=True) - tokenized_datasets["train"] = tokenized_datasets["train"].select(train_population) - tokenized_datasets["test"] = tokenized_datasets["test"].select(test_population) - tokenized_datasets = tokenized_datasets.remove_columns("text") - tokenized_datasets = tokenized_datasets.rename_column("label", "labels") - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - trainloader = DataLoader( - tokenized_datasets["train"], - shuffle=True, - batch_size=32, - collate_fn=data_collator, - ) - testloader = DataLoader( - tokenized_datasets["test"], batch_size=32, collate_fn=data_collator + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(model_name) + + + def tokenize_function(examples): + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 ) - return trainloader, testloader -Training and testing the model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + +The Model +--------- -Once we have a way of creating our trainloader and testloader, -we can take care of the training and testing. -This is very similar to any :code:`PyTorch` training or testing loop: +We will leverage 🤗 Hugging Face to federate the training of language models over +multiple clients using Flower. More specifically, we will fine-tune a pre-trained +Transformer model (|berttiny|_) for sequence classification over the dataset of IMDB +ratings. The end goal is to detect if a movie rating is positive or negative. If you +have access to larger GPUs, feel free to use larger models! .. code-block:: python - from evaluate import load as load_metric - from transformers import AdamW + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + +Note that here, ``model_name`` is a string that will be loaded from the ``Context`` in +the ClientApp and ServerApp. - def train(net, trainloader, epochs): +In addition to loading the pretrained model weights and architecture, we also include +two utility functions to perform both training (i.e. ``train()``) and evaluation (i.e. +``test()``) using the above model. These functions should look fairly familiar if you +have some prior experience with PyTorch. Note these functions do not have anything +specific to Flower. That being said, the training function will normally be called, as +we'll see later, from a Flower client passing its own data. In summary, your clients can +use standard training/testing functions to perform local training or evaluation: + +.. code-block:: python + + def train(net, trainloader, epochs, device): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() - def test(net, testloader): + + + def test(net, testloader, device): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits @@ -116,114 +224,180 @@ This is very similar to any :code:`PyTorch` training or testing loop: accuracy = metric.compute()["accuracy"] return loss, accuracy +The ClientApp +------------- -Creating the model itself -^^^^^^^^^^^^^^^^^^^^^^^^^ +The main changes we have to make to use 🤗 Hugging Face with Flower will be found in the +``get_weights()`` and ``set_weights()`` functions. Under the hood, the ``transformers`` +library uses PyTorch, which means we can reuse the ``get_weights()`` and +``set_weights()`` code that we defined in the :doc:`Quickstart PyTorch +` tutorial. As a reminder, in ``get_weights()``, PyTorch +model parameters are extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the opposite: given a list of NumPy arrays it applies +them to an existing PyTorch model. Doing this in fairly easy in PyTorch. -To create the model itself, -we will just load the pre-trained distillBERT model using Hugging Face’s :code:`AutoModelForSequenceClassification` : +.. note:: + + The specific implementation of ``get_weights()`` and ``set_weights()`` depends on + the type of models you use. The ones shown below work for a wide range of PyTorch + models but you might need to adjust them if you have more exotic model + architectures. .. code-block:: python - from transformers import AutoModelForSequenceClassification + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) -Federating the example ----------------------- +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: -Creating the IMDBClient -^^^^^^^^^^^^^^^^^^^^^^^ +.. code-block:: python -To federate our example to multiple clients, -we first need to write our Flower client class (inheriting from :code:`flwr.client.NumPyClient`). -This is very easy, as our model is a standard :code:`PyTorch` model: + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, testloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.testloader = testloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return float(loss), len(self.testloader), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the ``local-epochs`` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define +additional hyperparameters in ``pyproject.toml`` and access them here. .. code-block:: python - from collections import OrderedDict - import flwr as fl - - class IMDBClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - def fit(self, parameters, config): - self.set_parameters(parameters) - print("Training Started...") - train(net, trainloader, epochs=1) - print("Training Finished.") - return self.get_parameters(config={}), len(trainloader), {} - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return float(loss), len(testloader), {"accuracy": float(accuracy)} - - -The :code:`get_parameters` function lets the server get the client's parameters. -Inversely, the :code:`set_parameters` function allows the server to send its parameters to the client. -Finally, the :code:`fit` function trains the model locally for the client, -and the :code:`evaluate` function tests the model locally and returns the relevant metrics. - -Starting the server -^^^^^^^^^^^^^^^^^^^ - -Now that we have a way to instantiate clients, we need to create our server in order to aggregate the results. -Using Flower, this can be done very easily by first choosing a strategy (here, we are using :code:`FedAvg`, -which will define the global weights as the average of all the clients' weights at each round) -and then using the :code:`flwr.server.start_server` function: + def client_fn(context: Context): -.. code-block:: python + # Get this client's dataset partition + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) - def weighted_average(metrics): - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - losses = [num_examples * m["loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - return {"accuracy": sum(accuracies) / sum(examples), "loss": sum(losses) / sum(examples)} - - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=weighted_average, - ) + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) - # Start server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() -The :code:`weighted_average` function is there to provide a way to aggregate the metrics distributed amongst -the clients (basically this allows us to display a nice average accuracy and loss for every round). + # Flower ClientApp + app = ClientApp(client_fn) -Putting everything together ---------------------------- +The ServerApp +------------- -We can now start client instances using: +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is |serverappcomponents|_ as +opposed to a |client|_ In this example we use the `FedAvg` strategy. To it we pass a +randomly initialized model that will server as the global model to federated. Note that +the value of ``fraction_fit`` is read from the run config. You can find the default +value defined in the ``pyproject.toml``. .. code-block:: python - fl.client.start_client( - server_address="127.0.0.1:8080", - client=IMDBClient().to_client() - ) + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) + + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) -And they will be able to connect to the server and start the federated training. + # Create ServerApp + app = ServerApp(server_fn=server_fn) -If you want to check out everything put together, -you should check out the `full code example `_ . +Congratulations! You've successfully built and run your first federated learning system +for an LLM. -Of course, this is a very basic example, and a lot can be added or modified, -it was just to showcase how simply we could federate a Hugging Face workflow using Flower. +.. note:: -Note that in this example we used :code:`PyTorch`, but we could have very well used :code:`TensorFlow`. + Check the source code of the extended version of this tutorial in + |quickstart_hf_link|_ in the Flower GitHub repository. For a comprehensive example + of a federated fine-tuning of an LLM with Flower, refer to the |flowertune|_ example + in the Flower GitHub repository. + +.. |quickstart_hf_link| replace:: ``examples/quickstart-huggingface`` + +.. |fedavg| replace:: ``FedAvg`` + +.. |iidpartitioner| replace:: ``IidPartitioner`` + +.. |otherpartitioners| replace:: other partitioners + +.. |berttiny| replace:: ``bert-tiny`` + +.. |serverappcomponents| replace:: ``ServerAppComponents`` + +.. |client| replace:: ``Client`` + +.. |flowerdatasets| replace:: Flower Datasets + +.. |flowertune| replace:: FlowerTune LLM + +.. _berttiny: https://huggingface.co/prajjwal1/bert-tiny + +.. _client: ref-api/flwr.client.Client.html#client + +.. _fedavg: ref-api/flwr.server.strategy.FedAvg.html#flwr.server.strategy.FedAvg + +.. _flowerdatasets: https://flower.ai/docs/datasets/ + +.. _flowertune: https://github.com/adap/flower/tree/main/examples/flowertune-llm + +.. _iidpartitioner: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner + +.. _otherpartitioners: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html + +.. _quickstart_hf_link: https://github.com/adap/flower/tree/main/examples/quickstart-huggingface + +.. _serverappcomponents: ref-api/flwr.server.ServerAppComponents.html#serverappcomponents + +.. meta:: + :description: Check out this Federating Learning quickstart tutorial for using Flower with 🤗 HuggingFace Transformers in order to fine-tune an LLM. diff --git a/doc/source/tutorial-quickstart-ios.rst b/doc/source/tutorial-quickstart-ios.rst index e4315ce569fb..8a9250f8dfb0 100644 --- a/doc/source/tutorial-quickstart-ios.rst +++ b/doc/source/tutorial-quickstart-ios.rst @@ -1,136 +1,155 @@ .. _quickstart-ios: - Quickstart iOS ============== .. meta:: - :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. + :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST. -In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. +In this tutorial we will learn how to train a Neural Network on MNIST using Flower and +CoreML on iOS devices. -First of all, for running the Flower Python server, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. -For the Flower client implementation in iOS, it is recommended to use Xcode as our IDE. +First of all, for running the Flower Python server, it is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. For the Flower client implementation in iOS, +it is recommended to use Xcode as our IDE. -Our example consists of one Python *server* and two iPhone *clients* that all have the same model. +Our example consists of one Python *server* and two iPhone *clients* that all have the +same model. -*Clients* are responsible for generating individual weight updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce a better model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of weight updates is called a *round*. +*Clients* are responsible for generating individual weight updates for the model based +on their local datasets. These updates are then sent to the *server* which will +aggregate them to produce a better model. Finally, the *server* sends this improved +version of the model back to each *client*. A complete cycle of weight updates is called +a *round*. -Now that we have a rough idea of what is going on, let's get started to setup our Flower server environment. We first need to install Flower. You can do this by using pip: +Now that we have a rough idea of what is going on, let's get started to setup our Flower +server environment. We first need to install Flower. You can do this by using pip: .. code-block:: shell - $ pip install flwr + $ pip install flwr Or Poetry: .. code-block:: shell - $ poetry add flwr + $ poetry add flwr Flower Client ------------- -Now that we have all our dependencies installed, let's run a simple distributed training using CoreML as our local training pipeline and MNIST as our dataset. -For simplicity reasons we will use the complete Flower client with CoreML, that has been implemented and stored inside the Swift SDK. The client implementation can be seen below: +Now that we have all our dependencies installed, let's run a simple distributed training +using CoreML as our local training pipeline and MNIST as our dataset. For simplicity +reasons we will use the complete Flower client with CoreML, that has been implemented +and stored inside the Swift SDK. The client implementation can be seen below: .. code-block:: swift - /// Parses the parameters from the local model and returns them as GetParametersRes struct - /// - /// - Returns: Parameters from the local model - public func getParameters() -> GetParametersRes { - let parameters = parameters.weightsToParameters() - let status = Status(code: .ok, message: String()) - - return GetParametersRes(parameters: parameters, status: status) - } - - /// Calls the routine to fit the local model - /// - /// - Returns: The result from the local training, e.g., updated parameters - public func fit(ins: FitIns) -> FitRes { - let status = Status(code: .ok, message: String()) - let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) - let parameters = parameters.weightsToParameters() - - return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) - } + /// Parses the parameters from the local model and returns them as GetParametersRes struct + /// + /// - Returns: Parameters from the local model + public func getParameters() -> GetParametersRes { + let parameters = parameters.weightsToParameters() + let status = Status(code: .ok, message: String()) - /// Calls the routine to evaluate the local model - /// - /// - Returns: The result from the evaluation, e.g., loss - public func evaluate(ins: EvaluateIns) -> EvaluateRes { - let status = Status(code: .ok, message: String()) - let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) + return GetParametersRes(parameters: parameters, status: status) + } - return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) - } + /// Calls the routine to fit the local model + /// + /// - Returns: The result from the local training, e.g., updated parameters + public func fit(ins: FitIns) -> FitRes { + let status = Status(code: .ok, message: String()) + let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .train) + let parameters = parameters.weightsToParameters() + + return FitRes(parameters: parameters, numExamples: result.numSamples, status: status) + } + + /// Calls the routine to evaluate the local model + /// + /// - Returns: The result from the evaluation, e.g., loss + public func evaluate(ins: EvaluateIns) -> EvaluateRes { + let status = Status(code: .ok, message: String()) + let result = runMLTask(configuration: parameters.parametersToWeights(parameters: ins.parameters), task: .test) + + return EvaluateRes(loss: Float(result.loss), numExamples: result.numSamples, status: status) + } -Let's create a new application project in Xcode and add :code:`flwr` as a dependency in your project. For our application, we will store the logic of our app in :code:`FLiOSModel.swift` and the UI elements in :code:`ContentView.swift`. -We will focus more on :code:`FLiOSModel.swift` in this quickstart. Please refer to the `full code example `_ to learn more about the app. +Let's create a new application project in Xcode and add ``flwr`` as a dependency in your +project. For our application, we will store the logic of our app in ``FLiOSModel.swift`` +and the UI elements in ``ContentView.swift``. We will focus more on ``FLiOSModel.swift`` +in this quickstart. Please refer to the `full code example +`_ to learn more about the app. -Import Flower and CoreML related packages in :code:`FLiOSModel.swift`: +Import Flower and CoreML related packages in ``FLiOSModel.swift``: .. code-block:: swift - import Foundation - import CoreML - import flwr + import Foundation + import CoreML + import flwr -Then add the mlmodel to the project simply by drag-and-drop, the mlmodel will be bundled inside the application during deployment to your iOS device. -We need to pass the url to access mlmodel and run CoreML machine learning processes, it can be retrieved by calling the function :code:`Bundle.main.url`. -For the MNIST dataset, we need to preprocess it into :code:`MLBatchProvider` object. The preprocessing is done inside :code:`DataLoader.swift`. +Then add the mlmodel to the project simply by drag-and-drop, the mlmodel will be bundled +inside the application during deployment to your iOS device. We need to pass the url to +access mlmodel and run CoreML machine learning processes, it can be retrieved by calling +the function ``Bundle.main.url``. For the MNIST dataset, we need to preprocess it into +``MLBatchProvider`` object. The preprocessing is done inside ``DataLoader.swift``. .. code-block:: swift - // prepare train dataset - let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } + // prepare train dataset + let trainBatchProvider = DataLoader.trainBatchProvider() { _ in } - // prepare test dataset - let testBatchProvider = DataLoader.testBatchProvider() { _ in } + // prepare test dataset + let testBatchProvider = DataLoader.testBatchProvider() { _ in } - // load them together - let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, - testBatchProvider: testBatchProvider) + // load them together + let dataLoader = MLDataLoader(trainBatchProvider: trainBatchProvider, + testBatchProvider: testBatchProvider) -Since CoreML does not allow the model parameters to be seen before training, and accessing the model parameters during or after the training can only be done by specifying the layer name, -we need to know this information beforehand, through looking at the model specification, which are written as proto files. The implementation can be seen in :code:`MLModelInspect`. +Since CoreML does not allow the model parameters to be seen before training, and +accessing the model parameters during or after the training can only be done by +specifying the layer name, we need to know this information beforehand, through looking +at the model specification, which are written as proto files. The implementation can be +seen in ``MLModelInspect``. After we have all of the necessary information, let's create our Flower client. .. code-block:: swift - let compiledModelUrl = try MLModel.compileModel(at: url) + let compiledModelUrl = try MLModel.compileModel(at: url) - // inspect the model to be able to access the model parameters - // to access the model we need to know the layer name - // since the model parameters are stored as key value pairs - let modelInspect = try MLModelInspect(serializedData: Data(contentsOf: url)) - let layerWrappers = modelInspect.getLayerWrappers() - self.mlFlwrClient = MLFlwrClient(layerWrappers: layerWrappers, - dataLoader: dataLoader, - compiledModelUrl: compiledModelUrl) + // inspect the model to be able to access the model parameters + // to access the model we need to know the layer name + // since the model parameters are stored as key value pairs + let modelInspect = try MLModelInspect(serializedData: Data(contentsOf: url)) + let layerWrappers = modelInspect.getLayerWrappers() + self.mlFlwrClient = MLFlwrClient(layerWrappers: layerWrappers, + dataLoader: dataLoader, + compiledModelUrl: compiledModelUrl) -Then start the Flower gRPC client and start communicating to the server by passing our Flower client to the function :code:`startFlwrGRPC`. +Then start the Flower gRPC client and start communicating to the server by passing our +Flower client to the function ``startFlwrGRPC``. .. code-block:: swift - self.flwrGRPC = FlwrGRPC(serverHost: hostname, serverPort: port) - self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) + self.flwrGRPC = FlwrGRPC(serverHost: hostname, serverPort: port) + self.flwrGRPC.startFlwrGRPC(client: self.mlFlwrClient) -That's it for the client. We only have to implement :code:`Client` or call the provided -:code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. The attribute :code:`hostname` and :code:`port` tells the client which server to connect to. -This can be done by entering the hostname and port in the application before clicking the start button to start the federated learning process. +That's it for the client. We only have to implement ``Client`` or call the provided +``MLFlwrClient`` and call ``startFlwrGRPC()``. The attribute ``hostname`` and ``port`` +tells the client which server to connect to. This can be done by entering the hostname +and port in the application before clicking the start button to start the federated +learning process. Flower Server ------------- -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: +For simple workloads we can start a Flower server and leave all the configuration +possibilities at their default values. In a file named ``server.py``, import Flower and +start the server: .. code-block:: python @@ -141,18 +160,21 @@ configuration possibilities at their default values. In a file named Train the model, federated! --------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. FL systems usually have a server and multiple clients. We therefore have to +start the server first: .. code-block:: shell $ python server.py -Once the server is running we can start the clients in different terminals. -Build and run the client through your Xcode, one through Xcode Simulator and the other by deploying it to your iPhone. -To see more about how to deploy your app to iPhone or Simulator visit `here `_. +Once the server is running we can start the clients in different terminals. Build and +run the client through your Xcode, one through Xcode Simulator and the other by +deploying it to your iPhone. To see more about how to deploy your app to iPhone or +Simulator visit `here +`_. -Congratulations! -You've successfully built and run your first federated learning system in your ios device. -The full `source code `_ for this example can be found in :code:`examples/ios`. +Congratulations! You've successfully built and run your first federated learning system +in your ios device. The full `source code +`_ for this example can be found +in ``examples/ios``. diff --git a/doc/source/tutorial-quickstart-jax.rst b/doc/source/tutorial-quickstart-jax.rst index d2b9243e2bb3..833270d5636f 100644 --- a/doc/source/tutorial-quickstart-jax.rst +++ b/doc/source/tutorial-quickstart-jax.rst @@ -1,288 +1,305 @@ .. _quickstart-jax: - Quickstart JAX ============== -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset. +In this federated learning tutorial we will learn how to train a linear regression model +using Flower and `JAX `_. It is recommended to +create a virtual environment and run everything within a :doc:`virtualenv +`. -This tutorial will show you how to use Flower to build a federated version of an existing JAX workload. -We are using JAX to train a linear regression model on a scikit-learn dataset. -We will structure the example similar to our `PyTorch - From Centralized To Federated `_ walkthrough. -First, we build a centralized training approach based on the `Linear Regression with JAX `_ tutorial`. -Then, we build upon the centralized training code to run the training in a federated fashion. +Let's use ``flwr new`` to create a complete Flower+JAX project. It will generate all the +files needed to run, by default with the Flower Simulation Engine, a federation of 10 +nodes using |fedavg|_. A random regression dataset will be loaded from scikit-learn's +|makeregression|_ function. -Before we start building our JAX example, we need install the packages :code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: .. code-block:: shell - $ pip install jax jaxlib scikit-learn flwr + # In a new Python environment + $ pip install flwr +Then, run the command below. You will be prompted to select one of the available +templates (choose ``JAX``), give a name to your project, and type in your developer +name: -Linear Regression with JAX --------------------------- +.. code-block:: shell -We begin with a brief description of the centralized training code based on a :code:`Linear Regression` model. -If you want a more in-depth explanation of what's going on then have a look at the official `JAX documentation `_. + $ flwr new -Let's create a new file called :code:`jax_training.py` with all the components required for a traditional (centralized) linear regression training. -First, the JAX packages :code:`jax` and :code:`jaxlib` need to be imported. In addition, we need to import :code:`sklearn` since we use :code:`make_regression` for the dataset and :code:`train_test_split` to split the dataset into a training and test set. -You can see that we do not yet import the :code:`flwr` package for federated learning. This will be done later. +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code-block:: python +.. code-block:: shell + + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +If you haven't yet installed the project and its dependencies, you can do so by: + +.. code-block:: shell + + # From the directory where your pyproject.toml is + $ pip install -e . + +To run the project, do: + +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Requesting initial parameters from one random client + INFO : Received initial parameters from one random client + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 6.07s + INFO : History (loss, distributed): + INFO : round 1: 0.29372873306274416 + INFO : round 2: 5.820648354415425e-08 + INFO : round 3: 1.526226667528834e-14 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell - from typing import Dict, List, Tuple, Callable - import jax - import jax.numpy as jnp - from sklearn.datasets import make_regression - from sklearn.model_selection import train_test_split + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 input-dim=5" - key = jax.random.PRNGKey(0) +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. -The :code:`load_data()` function loads the mentioned training and test sets. +The Data +-------- + +This tutorial uses scikit-learn's |makeregression|_ function to generate a random +regression problem. .. code-block:: python - def load_data() -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]]: - # create our dataset and start with similar datasets for different clients + def load_data(): + # Load dataset X, y = make_regression(n_features=3, random_state=0) X, X_test, y, y_test = train_test_split(X, y) return X, y, X_test, y_test -The model architecture (a very simple :code:`Linear Regression` model) is defined in :code:`load_model()`. +The Model +--------- + +We defined a simple linear regression model to demonstrate how to create a JAX model, +but feel free to replace it with a more sophisticated JAX model if you'd like, (such as +with NN-based `Flax `_): .. code-block:: python - def load_model(model_shape) -> Dict: - # model weights - params = { - 'b' : jax.random.uniform(key), - 'w' : jax.random.uniform(key, model_shape) - } + def load_model(model_shape): + # Extract model parameters + params = {"b": jax.random.uniform(key), "w": jax.random.uniform(key, model_shape)} return params -We now need to define the training (function :code:`train()`), which loops over the training set and measures the loss (function :code:`loss_fn()`) for each batch of training examples. The loss function is separate since JAX takes derivatives with a :code:`grad()` function (defined in the :code:`main()` function and called in :code:`train()`). +In addition to defining the model architecture, we also include two utility functions to +perform both training (i.e. ``train()``) and evaluation (i.e. ``evaluation()``) using +the above model. .. code-block:: python - def loss_fn(params, X, y) -> Callable: - err = jnp.dot(X, params['w']) + params['b'] - y - return jnp.mean(jnp.square(err)) # mse + def loss_fn(params, X, y): + # Return MSE as loss + err = jnp.dot(X, params["w"]) + params["b"] - y + return jnp.mean(jnp.square(err)) + - def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: + def train(params, grad_fn, X, y): + loss = 1_000_000 num_examples = X.shape[0] - for epochs in range(10): + for epochs in range(50): grads = grad_fn(params, X, y) - params = jax.tree_multimap(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params,X, y) - # if epochs % 10 == 9: - # print(f'For Epoch {epochs} loss {loss}') + params = jax.tree.map(lambda p, g: p - 0.05 * g, params, grads) + loss = loss_fn(params, X, y) return params, loss, num_examples -The evaluation of the model is defined in the function :code:`evaluation()`. The function takes all test examples and measures the loss of the linear regression model. - -.. code-block:: python - def evaluation(params, grad_fn, X_test, y_test) -> Tuple[float, int]: + def evaluation(params, grad_fn, X_test, y_test): num_examples = X_test.shape[0] err_test = loss_fn(params, X_test, y_test) loss_test = jnp.mean(jnp.square(err_test)) - # print(f'Test loss {loss_test}') return loss_test, num_examples -Having defined the data loading, model architecture, training, and evaluation we can put everything together and train our model using JAX. As already mentioned, the :code:`jax.grad()` function is defined in :code:`main()` and passed to :code:`train()`. - -.. code-block:: python - - def main(): - X, y, X_test, y_test = load_data() - model_shape = X.shape[1:] - grad_fn = jax.grad(loss_fn) - print("Model Shape", model_shape) - params = load_model(model_shape) - params, loss, num_examples = train(params, grad_fn, X, y) - evaluation(params, grad_fn, X_test, y_test) +The ClientApp +------------- +The main changes we have to make to use JAX with Flower will be found in the +``get_params()`` and ``set_params()`` functions. In ``get_params()``, JAX model +parameters are extracted and represented as a list of NumPy arrays. The ``set_params()`` +function is the opposite: given a list of NumPy arrays it applies them to an existing +JAX model. - if __name__ == "__main__": - main() +.. note:: -You can now run your (centralized) JAX linear regression workload: + The ``get_params()`` and ``set_params()`` functions here are conceptually similar to + the ``get_weights()`` and ``set_weights()`` functions that we defined in the + :doc:`QuickStart PyTorch ` tutorial. .. code-block:: python - python3 jax_training.py + def get_params(params): + parameters = [] + for _, val in params.items(): + parameters.append(np.array(val)) + return parameters -So far this should all look fairly familiar if you've used JAX before. -Let's take the next step and use what we've built to create a simple federated learning system consisting of one server and two clients. -JAX meets Flower ----------------- + def set_params(local_params, global_params): + for key, value in list(zip(local_params.keys(), global_params)): + local_params[key] = value -The concept of federating an existing workload is always the same and easy to understand. -We have to start a *server* and then use the code in :code:`jax_training.py` for the *clients* that are connected to the *server*. -The *server* sends model parameters to the clients. The *clients* run the training and update the parameters. -The updated parameters are sent back to the *server*, which averages all received parameter updates. -This describes one round of the federated learning process, and we repeat this for multiple rounds. - -Our example consists of one *server* and two *clients*. Let's set up :code:`server.py` first. The *server* needs to import the Flower package :code:`flwr`. -Next, we use the :code:`start_server` function to start a server and tell it to perform three rounds of federated learning. +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: .. code-block:: python - import flwr as fl - - if __name__ == "__main__": - fl.server.start_server(server_address="0.0.0.0:8080", config=fl.server.ServerConfig(num_rounds=3)) + class FlowerClient(NumPyClient): + def __init__(self, input_dim): + self.train_x, self.train_y, self.test_x, self.test_y = load_data() + self.grad_fn = jax.grad(loss_fn) + model_shape = self.train_x.shape[1:] -We can already start the *server*: + self.params = load_model(model_shape) -.. code-block:: python + def fit(self, parameters, config): + set_params(self.params, parameters) + self.params, loss, num_examples = train( + self.params, self.grad_fn, self.train_x, self.train_y + ) + parameters = get_params({}) + return parameters, num_examples, {"loss": float(loss)} - python3 server.py + def evaluate(self, parameters, config): + set_params(self.params, parameters) + loss, num_examples = evaluation( + self.params, self.grad_fn, self.test_x, self.test_y + ) + return float(loss), num_examples, {"loss": float(loss)} -Finally, we will define our *client* logic in :code:`client.py` and build upon the previously defined JAX training in :code:`jax_training.py`. -Our *client* needs to import :code:`flwr`, but also :code:`jax` and :code:`jaxlib` to update the parameters on our JAX model: +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the ``local-epochs`` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define +additioinal hyperparameters in ``pyproject.toml`` and access them here. .. code-block:: python - from typing import Dict, List, Callable, Tuple + def client_fn(context: Context): + input_dim = context.run_config["input-dim"] + # Return Client instance + return FlowerClient(input_dim).to_client() - import flwr as fl - import numpy as np - import jax - import jax.numpy as jnp - import jax_training + # Flower ClientApp + app = ClientApp(client_fn) +The ServerApp +------------- -Implementing a Flower *client* basically means implementing a subclass of either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. -Our implementation will be based on :code:`flwr.client.NumPyClient` and we'll call it :code:`FlowerClient`. -:code:`NumPyClient` is slightly easier to implement than :code:`Client` if you use a framework with good NumPy interoperability (like JAX) because it avoids some of the boilerplate that would otherwise be necessary. -:code:`FlowerClient` needs to implement four methods, two methods for getting/setting model parameters, one method for training the model, and one method for testing the model: +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is |serverappcomponents|_ as +opposed to a |client|_ In this example we use the ``FedAvg`` strategy. To it we pass a +randomly initialized model that will server as the global model to federated. Note that +the value of ``input_dim`` is read from the run config. You can find the default value +defined in the ``pyproject.toml``. -#. :code:`set_parameters (optional)` - * set the model parameters on the local model that are received from the server - * transform parameters to NumPy :code:`ndarray`'s - * loop over the list of model parameters received as NumPy :code:`ndarray`'s (think list of neural network layers) -#. :code:`get_parameters` - * get the model parameters and return them as a list of NumPy :code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects) -#. :code:`fit` - * update the parameters of the local model with the parameters received from the server - * train the model on the local training set - * get the updated local model parameters and return them to the server -#. :code:`evaluate` - * update the parameters of the local model with the parameters received from the server - * evaluate the updated model on the local test set - * return the local loss to the server +.. code-block:: python -The challenging part is to transform the JAX model parameters from :code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with `NumPyClient`. + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + input_dim = context.run_config["input-dim"] -The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make use of the functions :code:`train()` and :code:`evaluate()` previously defined in :code:`jax_training.py`. -So what we really do here is we tell Flower through our :code:`NumPyClient` subclass which of our already defined functions to call for training and evaluation. -We included type annotations to give you a better understanding of the data types that get passed around. + # Initialize global model + params = get_params(load_model((input_dim,))) + initial_parameters = ndarrays_to_parameters(params) -.. code-block:: python + # Define strategy + strategy = FedAvg(initial_parameters=initial_parameters) + config = ServerConfig(num_rounds=num_rounds) + return ServerAppComponents(strategy=strategy, config=config) - class FlowerClient(fl.client.NumPyClient): - """Flower client implementing using linear regression and JAX.""" - - def __init__( - self, - params: Dict, - grad_fn: Callable, - train_x: List[np.ndarray], - train_y: List[np.ndarray], - test_x: List[np.ndarray], - test_y: List[np.ndarray], - ) -> None: - self.params= params - self.grad_fn = grad_fn - self.train_x = train_x - self.train_y = train_y - self.test_x = test_x - self.test_y = test_y - - def get_parameters(self, config) -> Dict: - # Return model parameters as a list of NumPy ndarrays - parameter_value = [] - for _, val in self.params.items(): - parameter_value.append(np.array(val)) - return parameter_value - - def set_parameters(self, parameters: List[np.ndarray]) -> Dict: - # Collect model parameters and update the parameters of the local model - value=jnp.ndarray - params_item = list(zip(self.params.keys(),parameters)) - for item in params_item: - key = item[0] - value = item[1] - self.params[key] = value - return self.params - - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - print("Start local training") - self.params = self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train(self.params, self.grad_fn, self.train_x, self.train_y) - results = {"loss": float(loss)} - print("Training results", results) - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[float, int, Dict]: - # Set model parameters, evaluate the model on a local test dataset, return result - print("Start evaluation") - self.params = self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation(self.params,self.grad_fn, self.test_x, self.test_y) - print("Evaluation accuracy & loss", loss) - return ( - float(loss), - num_examples, - {"loss": float(loss)}, - ) -Having defined the federation process, we can run it. + # Create ServerApp + app = ServerApp(server_fn=server_fn) -.. code-block:: python +Congratulations! You've successfully built and run your first federated learning system +for JAX with Flower! - def main() -> None: - """Load data, start MNISTClient.""" +.. note:: - # Load data - train_x, train_y, test_x, test_y = jax_training.load_data() - grad_fn = jax.grad(jax_training.loss_fn) + Check the source code of the extended version of this tutorial in + |quickstart_jax_link|_ in the Flower GitHub repository. - # Load model (from centralized training) and initialize parameters - model_shape = train_x.shape[1:] - params = jax_training.load_model(model_shape) +.. |client| replace:: ``Client`` - # Start Flower client - client = FlowerClient(params, grad_fn, train_x, train_y, test_x, test_y) - fl.client.start_client(server_address="0.0.0.0:8080", client=client.to_client()) +.. |fedavg| replace:: ``FedAvg`` - if __name__ == "__main__": - main() +.. |makeregression| replace:: ``make_regression()`` +.. |quickstart_jax_link| replace:: ``examples/quickstart-jax`` -And that's it. You can now open two additional terminal windows and run +.. |serverappcomponents| replace:: ``ServerAppComponents`` -.. code-block:: python +.. _client: ref-api/flwr.client.Client.html#client - python3 client.py +.. _fedavg: ref-api/flwr.server.strategy.FedAvg.html#flwr.server.strategy.FedAvg -in each window (make sure that the server is still running before you do so) and see your JAX project run federated learning across two clients. Congratulations! +.. _makeregression: https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html -Next Steps ----------- +.. _quickstart_jax_link: https://github.com/adap/flower/tree/main/examples/quickstart-jax -The source code of this example was improved over time and can be found here: `Quickstart JAX `_. -Our example is somewhat over-simplified because both clients load the same dataset. +.. _serverappcomponents: ref-api/flwr.server.ServerAppComponents.html#serverappcomponents -You're now prepared to explore this topic further. How about using a more sophisticated model or using a different dataset? How about adding more clients? +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset. diff --git a/doc/source/tutorial-quickstart-mlx.rst b/doc/source/tutorial-quickstart-mlx.rst index 0999bf44d3b7..40e870ddc822 100644 --- a/doc/source/tutorial-quickstart-mlx.rst +++ b/doc/source/tutorial-quickstart-mlx.rst @@ -1,410 +1,393 @@ .. _quickstart-mlx: -################ - Quickstart MLX -################ - -In this federated learning tutorial we will learn how to train simple -MLP on MNIST using Flower and MLX. It is recommended to create a virtual -environment and run everything within a :doc:`virtualenv -`. - -Let's use `flwr new` to create a complete Flower+MLX project. It will -generate all the files needed to run, by default with the Simulation -Engine, a federation of 10 nodes using `FedAvg +Quickstart MLX +============== + +In this federated learning tutorial we will learn how to train simple MLP on MNIST using +Flower and MLX. It is recommended to create a virtual environment and run everything +within a :doc:`virtualenv `. + +Let's use `flwr new` to create a complete Flower+MLX project. It will generate all the +files needed to run, by default with the Simulation Engine, a federation of 10 nodes +using `FedAvg `_. The dataset will be partitioned using Flower Dataset's `IidPartitioner `_. -Now that we have a rough idea of what this example is about, let's get -started. First, install Flower in your new environment: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. code:: shell +.. code-block:: shell - # In a new Python environment - $ pip install flwr + # In a new Python environment + $ pip install flwr -Then, run the command below. You will be prompted to select of the -available templates (choose ``MLX``), give a name to your project, and -type in your developer name: +Then, run the command below. You will be prompted to select of the available templates +(choose ``MLX``), give a name to your project, and type in your developer name: -.. code:: shell +.. code-block:: shell - $ flwr new + $ flwr new -After running it you'll notice a new directory with your project name -has been created. It should have the following structure: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code:: shell +.. code-block:: shell - - ├── - │ ├── __init__.py - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -If you haven't yet installed the project and its dependencies, you can -do so by: +If you haven't yet installed the project and its dependencies, you can do so by: -.. code:: shell +.. code-block:: shell - # From the directory where your pyproject.toml is - $ pip install -e . + # From the directory where your pyproject.toml is + $ pip install -e . To run the project do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Requesting initial parameters from one random client - WARNING : FAB ID is not provided; the default ClientApp will be loaded. - INFO : Received initial parameters from one random client - INFO : Evaluating initial global parameters - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 10 clients (out of 10) - INFO : aggregate_fit: received 10 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 8.15s - INFO : History (loss, distributed): - INFO : round 1: 2.243802046775818 - INFO : round 2: 2.101812958717346 - INFO : round 3: 1.7419301986694335 - INFO : - -You can also override the parameters defined in -``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config num-server-rounds=5,lr=0.05 - -What follows is an explanation of each component in the project you just -created: dataset partition, the model, defining the ``ClientApp`` and -defining the ``ServerApp``. - -********** - The Data -********** - -We will use `Flower Datasets `_ to -easily download and partition the `MNIST` dataset. In this example -you'll make use of the `IidPartitioner +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Requesting initial parameters from one random client + WARNING : FAB ID is not provided; the default ClientApp will be loaded. + INFO : Received initial parameters from one random client + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 8.15s + INFO : History (loss, distributed): + INFO : round 1: 2.243802046775818 + INFO : round 2: 2.101812958717346 + INFO : round 3: 1.7419301986694335 + INFO : + +You can also override the parameters defined in ``[tool.flwr.app.config]`` section in +the ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 lr=0.05" + +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. + +The Data +-------- + +We will use `Flower Datasets `_ to easily download and +partition the `MNIST` dataset. In this example you'll make use of the `IidPartitioner `_ -to generate `num_partitions` partitions. You can choose `other -partitioners -`_ -available in Flower Datasets: - -.. code:: python - - partitioner = IidPartitioner(num_partitions=num_partitions) - fds = FederatedDataset( - dataset="ylecun/mnist", - partitioners={"train": partitioner}, - ) - partition = fds.load_partition(partition_id) - partition_splits = partition.train_test_split(test_size=0.2, seed=42) - - partition_splits["train"].set_format("numpy") - partition_splits["test"].set_format("numpy") - - train_partition = partition_splits["train"].map( - lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, - input_columns="image", - ) - test_partition = partition_splits["test"].map( - lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, - input_columns="image", - ) - - data = ( - train_partition["img"], - train_partition["label"].astype(np.uint32), - test_partition["img"], - test_partition["label"].astype(np.uint32), - ) - - train_images, train_labels, test_images, test_labels = map(mx.array, data) - -*********** - The Model -*********** +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets: + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + partition_splits = partition.train_test_split(test_size=0.2, seed=42) + + partition_splits["train"].set_format("numpy") + partition_splits["test"].set_format("numpy") + + train_partition = partition_splits["train"].map( + lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, + input_columns="image", + ) + test_partition = partition_splits["test"].map( + lambda img: {"img": img.reshape(-1, 28 * 28).squeeze().astype(np.float32) / 255.0}, + input_columns="image", + ) + + data = ( + train_partition["img"], + train_partition["label"].astype(np.uint32), + test_partition["img"], + test_partition["label"].astype(np.uint32), + ) + + train_images, train_labels, test_images, test_labels = map(mx.array, data) + +The Model +--------- We define the model as in the `centralized MLX example -`_, it's a -simple MLP: +`_, it's a simple MLP: -.. code:: python +.. code-block:: python - class MLP(nn.Module): - """A simple MLP.""" + class MLP(nn.Module): + """A simple MLP.""" - def __init__( - self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int - ): - super().__init__() - layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] - self.layers = [ - nn.Linear(idim, odim) - for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) - ] + def __init__( + self, num_layers: int, input_dim: int, hidden_dim: int, output_dim: int + ): + super().__init__() + layer_sizes = [input_dim] + [hidden_dim] * num_layers + [output_dim] + self.layers = [ + nn.Linear(idim, odim) + for idim, odim in zip(layer_sizes[:-1], layer_sizes[1:]) + ] - def __call__(self, x): - for l in self.layers[:-1]: - x = mx.maximum(l(x), 0.0) - return self.layers[-1](x) + def __call__(self, x): + for l in self.layers[:-1]: + x = mx.maximum(l(x), 0.0) + return self.layers[-1](x) -We also define some utility functions to test our model and to iterate -over batches. +We also define some utility functions to test our model and to iterate over batches. -.. code:: python +.. code-block:: python - def loss_fn(model, X, y): - return mx.mean(nn.losses.cross_entropy(model(X), y)) + def loss_fn(model, X, y): + return mx.mean(nn.losses.cross_entropy(model(X), y)) - def eval_fn(model, X, y): - return mx.mean(mx.argmax(model(X), axis=1) == y) + def eval_fn(model, X, y): + return mx.mean(mx.argmax(model(X), axis=1) == y) - def batch_iterate(batch_size, X, y): - perm = mx.array(np.random.permutation(y.size)) - for s in range(0, y.size, batch_size): - ids = perm[s : s + batch_size] - yield X[ids], y[ids] + def batch_iterate(batch_size, X, y): + perm = mx.array(np.random.permutation(y.size)) + for s in range(0, y.size, batch_size): + ids = perm[s : s + batch_size] + yield X[ids], y[ids] The ClientApp -============= +~~~~~~~~~~~~~ -The main changes we have to make to use `MLX` with `Flower` will be -found in the ``get_params()`` and ``set_params()`` functions. Indeed, -MLX doesn't provide an easy way to convert the model parameters into a -list of ``np.array`` objects (the format we need for the serialization -of the messages to work). +The main changes we have to make to use `MLX` with `Flower` will be found in the +``get_params()`` and ``set_params()`` functions. Indeed, MLX doesn't provide an easy way +to convert the model parameters into a list of ``np.array`` objects (the format we need +for the serialization of the messages to work). The way MLX stores its parameters is as follows: -.. code:: shell - - { - "layers": [ - {"weight": mlx.core.array, "bias": mlx.core.array}, - {"weight": mlx.core.array, "bias": mlx.core.array}, - ..., - {"weight": mlx.core.array, "bias": mlx.core.array} - ] - } - -Therefore, to get our list of ``np.array`` objects, we need to extract -each array and convert them into a NumPy array: - -.. code:: python - - def get_params(model): - layers = model.parameters()["layers"] - return [np.array(val) for layer in layers for _, val in layer.items()] - -For the ``set_params()`` function, we perform the reverse operation. We -receive a list of NumPy arrays and want to convert them into MLX -parameters. Therefore, we iterate through pairs of parameters and assign -them to the `weight` and `bias` keys of each layer dict: - -.. code:: python - - def set_params(model, parameters): - new_params = {} - new_params["layers"] = [ - {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} - for i in range(0, len(parameters), 2) - ] - model.update(new_params) - -The rest of the functionality is directly inspired by the centralized -case. The ``fit()`` method in the client trains the model using the -local dataset: - -.. code:: python - - def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - _, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} - -Here, after updating the parameters, we perform the training as in the -centralized case, and return the new parameters. +.. code-block:: shell + + { + "layers": [ + {"weight": mlx.core.array, "bias": mlx.core.array}, + {"weight": mlx.core.array, "bias": mlx.core.array}, + ..., + {"weight": mlx.core.array, "bias": mlx.core.array} + ] + } + +Therefore, to get our list of ``np.array`` objects, we need to extract each array and +convert them into a NumPy array: + +.. code-block:: python + + def get_params(model): + layers = model.parameters()["layers"] + return [np.array(val) for layer in layers for _, val in layer.items()] + +For the ``set_params()`` function, we perform the reverse operation. We receive a list +of NumPy arrays and want to convert them into MLX parameters. Therefore, we iterate +through pairs of parameters and assign them to the `weight` and `bias` keys of each +layer dict: + +.. code-block:: python + + def set_params(model, parameters): + new_params = {} + new_params["layers"] = [ + {"weight": mx.array(parameters[i]), "bias": mx.array(parameters[i + 1])} + for i in range(0, len(parameters), 2) + ] + model.update(new_params) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset: + +.. code-block:: python + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + +Here, after updating the parameters, we perform the training as in the centralized case, +and return the new parameters. And for the ``evaluate()`` method of the client: -.. code:: python +.. code-block:: python - def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} -We also begin by updating the parameters with the ones sent by the -server, and then we compute the loss and accuracy using the functions -defined above. In the constructor of the ``FlowerClient`` we instantiate -the `MLP` model as well as other components such as the optimizer. +We also begin by updating the parameters with the ones sent by the server, and then we +compute the loss and accuracy using the functions defined above. In the constructor of +the ``FlowerClient`` we instantiate the `MLP` model as well as other components such as +the optimizer. Putting everything together we have: -.. code:: python - - class FlowerClient(NumPyClient): - def __init__( - self, - data, - num_layers, - hidden_dim, - num_classes, - batch_size, - learning_rate, - num_epochs, - ): - self.num_layers = num_layers - self.hidden_dim = hidden_dim - self.num_classes = num_classes - self.batch_size = batch_size - self.learning_rate = learning_rate - self.num_epochs = num_epochs - - self.train_images, self.train_labels, self.test_images, self.test_labels = data - self.model = MLP( - num_layers, self.train_images.shape[-1], hidden_dim, num_classes - ) - self.optimizer = optim.SGD(learning_rate=learning_rate) - self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) - self.num_epochs = num_epochs - self.batch_size = batch_size - - def get_parameters(self, config): - return get_params(self.model) - - def set_parameters(self, parameters): - set_params(self.model, parameters) - - def fit(self, parameters, config): - self.set_parameters(parameters) - for _ in range(self.num_epochs): - for X, y in batch_iterate( - self.batch_size, self.train_images, self.train_labels - ): - _, grads = self.loss_and_grad_fn(self.model, X, y) - self.optimizer.update(self.model, grads) - mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - accuracy = eval_fn(self.model, self.test_images, self.test_labels) - loss = loss_fn(self.model, self.test_images, self.test_labels) - return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} - -Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` -defined above by means of a ``client_fn()`` callback. Note that -``context`` enables you to get access to hyperparemeters defined in -``pyproject.toml`` to configure the run. In this tutorial we access, -among other hyperparameters, the ``local-epochs`` setting to control the -number of epochs a ``ClientApp`` will perform when running the ``fit()`` -method. - -.. code:: python - - def client_fn(context: Context): - partition_id = context.node_config["partition-id"] - num_partitions = context.node_config["num-partitions"] - data = load_data(partition_id, num_partitions) - - num_layers = context.run_config["num-layers"] - hidden_dim = context.run_config["hidden-dim"] - num_classes = 10 - batch_size = context.run_config["batch-size"] - learning_rate = context.run_config["lr"] - num_epochs = context.run_config["local-epochs"] - - # Return Client instance - return FlowerClient( - data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs - ).to_client() - - - # Flower ClientApp - app = ClientApp(client_fn) +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__( + self, + data, + num_layers, + hidden_dim, + num_classes, + batch_size, + learning_rate, + num_epochs, + ): + self.num_layers = num_layers + self.hidden_dim = hidden_dim + self.num_classes = num_classes + self.batch_size = batch_size + self.learning_rate = learning_rate + self.num_epochs = num_epochs + + self.train_images, self.train_labels, self.test_images, self.test_labels = data + self.model = MLP( + num_layers, self.train_images.shape[-1], hidden_dim, num_classes + ) + self.optimizer = optim.SGD(learning_rate=learning_rate) + self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) + self.num_epochs = num_epochs + self.batch_size = batch_size + + def get_parameters(self, config): + return get_params(self.model) + + def set_parameters(self, parameters): + set_params(self.model, parameters) + + def fit(self, parameters, config): + self.set_parameters(parameters) + for _ in range(self.num_epochs): + for X, y in batch_iterate( + self.batch_size, self.train_images, self.train_labels + ): + _, grads = self.loss_and_grad_fn(self.model, X, y) + self.optimizer.update(self.model, grads) + mx.eval(self.model.parameters(), self.optimizer.state) + return self.get_parameters(config={}), len(self.train_images), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + accuracy = eval_fn(self.model, self.test_images, self.test_labels) + loss = loss_fn(self.model, self.test_images, self.test_labels) + return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that ``context`` enables you to get access to +hyperparemeters defined in ``pyproject.toml`` to configure the run. In this tutorial we +access, among other hyperparameters, the ``local-epochs`` setting to control the number +of epochs a ``ClientApp`` will perform when running the ``fit()`` method. + +.. code-block:: python + + def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + + num_layers = context.run_config["num-layers"] + hidden_dim = context.run_config["hidden-dim"] + num_classes = 10 + batch_size = context.run_config["batch-size"] + learning_rate = context.run_config["lr"] + num_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient( + data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs + ).to_client() + + + # Flower ClientApp + app = ClientApp(client_fn) The ServerApp -------------- ++++++++++++++ -To construct a ``ServerApp``, we define a ``server_fn()`` callback with -an identical signature to that of ``client_fn()``, but the return type -is `ServerAppComponents +To construct a ``ServerApp``, we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()``, but the return type is `ServerAppComponents `_ as opposed to `Client -`_. -In this example we use the ``FedAvg`` strategy. +`_. In this +example we use the ``FedAvg`` strategy. -.. code:: python +.. code-block:: python - def server_fn(context: Context): - # Read from config - num_rounds = context.run_config["num-server-rounds"] + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] - # Define strategy - strategy = FedAvg() - config = ServerConfig(num_rounds=num_rounds) + # Define strategy + strategy = FedAvg() + config = ServerConfig(num_rounds=num_rounds) - return ServerAppComponents(strategy=strategy, config=config) + return ServerAppComponents(strategy=strategy, config=config) - # Create ServerApp - app = ServerApp(server_fn=server_fn) + # Create ServerApp + app = ServerApp(server_fn=server_fn) -Congratulations! You've successfully built and run your first federated -learning system. +Congratulations! You've successfully built and run your first federated learning system. .. note:: - Check the `source code - `_ - of the extended version of this tutorial in - ``examples/quickstart-mlx`` in the Flower GitHub repository. + Check the `source code + `_ of the extended + version of this tutorial in ``examples/quickstart-mlx`` in the Flower GitHub + repository. diff --git a/doc/source/tutorial-quickstart-pandas.rst b/doc/source/tutorial-quickstart-pandas.rst index bb9cb1b28b54..00d831a15736 100644 --- a/doc/source/tutorial-quickstart-pandas.rst +++ b/doc/source/tutorial-quickstart-pandas.rst @@ -1,12 +1,12 @@ .. _quickstart-pandas: - Quickstart Pandas ================= .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics. + :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics. Let's build a federated analytics system using Pandas and Flower! -Please refer to the `full code example `_ to learn more. +Please refer to the `full code example +`_ to learn more. diff --git a/doc/source/tutorial-quickstart-pytorch-lightning.rst b/doc/source/tutorial-quickstart-pytorch-lightning.rst index acfbecf41260..089865a2969d 100644 --- a/doc/source/tutorial-quickstart-pytorch-lightning.rst +++ b/doc/source/tutorial-quickstart-pytorch-lightning.rst @@ -1,12 +1,118 @@ .. _quickstart-pytorch-lightning: - Quickstart PyTorch Lightning ============================ -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch Lightning to train an Auto Encoder model on MNIST. +In this federated learning tutorial we will learn how to train an AutoEncoder model on +MNIST using Flower and PyTorch Lightning. It is recommended to create a virtual +environment and run everything within a :doc:`virtualenv +`. + +Then, clone the code example directly from GitHub: + +.. code-block:: shell + + git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pytorch-lightning . \ + && rm -rf _tmp && cd quickstart-pytorch-lightning + +This will create a new directory called `quickstart-pytorch-lightning` containing the +following files: + +.. code-block:: shell + + quickstart-pytorch-lightning + ├── pytorchlightning_example + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md + +Next, activate your environment, then run: + +.. code-block:: shell + + # Navigate to the example directory + $ cd path/to/quickstart-pytorch-lightning + + # Install project and dependencies + $ pip install -e . + +By default, Flower Simulation Engine will be started and it will create a federation of +4 nodes using `FedAvg +`_ +as the aggregation strategy. The dataset will be partitioned using Flower Dataset's +`IidPartitioner +`_. +To run the project, do: + +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 2 clients (out of 4) + INFO : aggregate_fit: received 2 results and 0 failures + INFO : configure_evaluate: strategy sampled 2 clients (out of 4) + INFO : aggregate_evaluate: received 2 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 136.92s + INFO : History (loss, distributed): + INFO : round 1: 0.04982871934771538 + INFO : round 2: 0.046457378193736076 + INFO : round 3: 0.04506748169660568 + INFO : + +Each simulated `ClientApp` (two per round) will also log a summary of their local +training process. Expect this output to be similar to: + +.. code-block:: shell + + # The left part indicates the process ID running the `ClientApp` + (ClientAppActor pid=38155) ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ + (ClientAppActor pid=38155) ┃ Test metric ┃ DataLoader 0 ┃ + (ClientAppActor pid=38155) ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ + (ClientAppActor pid=38155) │ test_loss │ 0.045175597071647644 │ + (ClientAppActor pid=38155) └───────────────────────────┴───────────────────────────┘ + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config num-server-rounds=5 -Let's build a horizontal federated learning system using PyTorch Lightning and Flower! +.. note:: -Please refer to the `full code example `_ to learn more. + Check the `source code + `_ + of this tutorial in ``examples/quickstart-pytorch-lightning`` in the Flower GitHub + repository. diff --git a/doc/source/tutorial-quickstart-pytorch.rst b/doc/source/tutorial-quickstart-pytorch.rst index 4515e8d0eeb5..6b99e378d086 100644 --- a/doc/source/tutorial-quickstart-pytorch.rst +++ b/doc/source/tutorial-quickstart-pytorch.rst @@ -1,384 +1,366 @@ .. _quickstart-pytorch: -#################### - Quickstart PyTorch -#################### - -In this federated learning tutorial we will learn how to train a -Convolutional Neural Network on CIFAR-10 using Flower and PyTorch. It is -recommended to create a virtual environment and run everything within a -:doc:`virtualenv `. - -Let's use `flwr new` to create a complete Flower+PyTorch project. It -will generate all the files needed to run, by default with the Flower -Simulation Engine, a federation of 10 nodes using `FedAvg +Quickstart PyTorch +================== + +In this federated learning tutorial we will learn how to train a Convolutional Neural +Network on CIFAR-10 using Flower and PyTorch. It is recommended to create a virtual +environment and run everything within a :doc:`virtualenv +`. + +Let's use `flwr new` to create a complete Flower+PyTorch project. It will generate all +the files needed to run, by default with the Flower Simulation Engine, a federation of +10 nodes using `FedAvg `_. The dataset will be partitioned using Flower Dataset's `IidPartitioner `_. -Now that we have a rough idea of what this example is about, let's get -started. First, install Flower in your new environment: +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. code:: shell +.. code-block:: shell - # In a new Python environment - $ pip install flwr + # In a new Python environment + $ pip install flwr -Then, run the command below. You will be prompted to select one of the -available templates (choose ``PyTorch``), give a name to your project, -and type in your developer name: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``PyTorch``), give a name to your project, and type in your developer +name: -.. code:: shell +.. code-block:: shell - $ flwr new + $ flwr new -After running it you'll notice a new directory with your project name -has been created. It should have the following structure: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: -.. code:: shell +.. code-block:: shell - - ├── - │ ├── __init__.py - │ ├── client_app.py # Defines your ClientApp - │ ├── server_app.py # Defines your ServerApp - │ └── task.py # Defines your model, training and data loading - ├── pyproject.toml # Project metadata like dependencies and configs - └── README.md + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -If you haven't yet installed the project and its dependencies, you can -do so by: +If you haven't yet installed the project and its dependencies, you can do so by: -.. code:: shell +.. code-block:: shell - # From the directory where your pyproject.toml is - $ pip install -e . + # From the directory where your pyproject.toml is + $ pip install -e . To run the project, do: -.. code:: shell +.. code-block:: shell - # Run with default arguments - $ flwr run . + # Run with default arguments + $ flwr run . With default arguments you will see an output like this one: -.. code:: shell - - Loading project configuration... - Success - WARNING : FAB ID is not provided; the default ClientApp will be loaded. - INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout - INFO : - INFO : [INIT] - INFO : Using initial global parameters provided by strategy - INFO : Evaluating initial global parameters - INFO : - INFO : [ROUND 1] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - WARNING : No fit_metrics_aggregation_fn provided - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - WARNING : No evaluate_metrics_aggregation_fn provided - INFO : - INFO : [ROUND 2] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [ROUND 3] - INFO : configure_fit: strategy sampled 5 clients (out of 10) - INFO : aggregate_fit: received 5 results and 0 failures - INFO : configure_evaluate: strategy sampled 10 clients (out of 10) - INFO : aggregate_evaluate: received 10 results and 0 failures - INFO : - INFO : [SUMMARY] - INFO : Run finished 3 round(s) in 21.35s - INFO : History (loss, distributed): - INFO : round 1: 2.2978184528648855 - INFO : round 2: 2.173852103948593 - INFO : round 3: 2.039920600131154 - INFO : - -You can also override the parameters defined in the -``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this: - -.. code:: shell - - # Override some arguments - $ flwr run . --run-config num-server-rounds=5,local-epochs=3 - -What follows is an explanation of each component in the project you just -created: dataset partition, the model, defining the ``ClientApp`` and -defining the ``ServerApp``. - -********** - The Data -********** - -This tutorial uses `Flower Datasets `_ -to easily download and partition the `CIFAR-10` dataset. In this example -you'll make use of the `IidPartitioner +.. code-block:: shell + + Loading project configuration... + Success + WARNING : FAB ID is not provided; the default ClientApp will be loaded. + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Evaluating initial global parameters + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 5 clients (out of 10) + INFO : aggregate_fit: received 5 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 21.35s + INFO : History (loss, distributed): + INFO : round 1: 2.2978184528648855 + INFO : round 2: 2.173852103948593 + INFO : round 3: 2.039920600131154 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 local-epochs=3" + +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. + +The Data +-------- + +This tutorial uses `Flower Datasets `_ to easily +download and partition the `CIFAR-10` dataset. In this example you'll make use of the +`IidPartitioner `_ -to generate `num_partitions` partitions. You can choose `other -partitioners -`_ -available in Flower Datasets. Each ``ClientApp`` will call this function -to create dataloaders with the data that correspond to their data -partition. - -.. code:: python - - partitioner = IidPartitioner(num_partitions=num_partitions) - fds = FederatedDataset( - dataset="uoft-cs/cifar10", - partitioners={"train": partitioner}, - ) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - - - def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - - partition_train_test = partition_train_test.with_transform(apply_transforms) - trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) - testloader = DataLoader(partition_train_test["test"], batch_size=32) - -*********** - The Model -*********** - -We defined a simple Convolutional Neural Network (CNN), but feel free to -replace it with a more sophisticated model if you'd like: - -.. code:: python - - class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - - def __init__(self): - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x): - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - -In addition to defining the model architecture, we also include two -utility functions to perform both training (i.e. ``train()``) and -evaluation (i.e. ``test()``) using the above model. These functions -should look fairly familiar if you have some prior experience with -PyTorch. Note these functions do not have anything specific to Flower. -That being said, the training function will normally be called, as we'll -see later, from a Flower client passing its own data. In summary, your -clients can use standard training/testing functions to perform local -training or evaluation: - -.. code:: python - - def train(net, trainloader, epochs, device): - """Train the model on the training set.""" - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) - net.train() - running_loss = 0.0 - for _ in range(epochs): - for batch in trainloader: - images = batch["img"] - labels = batch["label"] - optimizer.zero_grad() - loss = criterion(net(images.to(device)), labels.to(device)) - loss.backward() - optimizer.step() - running_loss += loss.item() - - avg_trainloss = running_loss / len(trainloader) - return avg_trainloss - - - def test(net, testloader, device): - """Validate the model on the test set.""" - net.to(device) - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for batch in testloader: - images = batch["img"].to(device) - labels = batch["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy - -*************** - The ClientApp -*************** - -The main changes we have to make to use `PyTorch` with `Flower` will be -found in the ``get_weights()`` and ``set_weights()`` functions. In -``get_weights()`` PyTorch model parameters are extracted and represented -as a list of NumPy arrays. The ``set_weights()`` function that's the -oposite: given a list of NumPy arrays it applies them to an existing -PyTorch model. Doing this in fairly easy in PyTorch. +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets. Each ``ClientApp`` will call this function to create dataloaders with +the data that correspond to their data partition. + +.. code-block:: python + + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + +The Model +--------- + +We defined a simple Convolutional Neural Network (CNN), but feel free to replace it with +a more sophisticated model if you'd like: + +.. code-block:: python + + class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + +In addition to defining the model architecture, we also include two utility functions to +perform both training (i.e. ``train()``) and evaluation (i.e. ``test()``) using the +above model. These functions should look fairly familiar if you have some prior +experience with PyTorch. Note these functions do not have anything specific to Flower. +That being said, the training function will normally be called, as we'll see later, from +a Flower client passing its own data. In summary, your clients can use standard +training/testing functions to perform local training or evaluation: + +.. code-block:: python + + def train(net, trainloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + + def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + +The ClientApp +------------- + +The main changes we have to make to use `PyTorch` with `Flower` will be found in the +``get_weights()`` and ``set_weights()`` functions. In ``get_weights()`` PyTorch model +parameters are extracted and represented as a list of NumPy arrays. The +``set_weights()`` function that's the oposite: given a list of NumPy arrays it applies +them to an existing PyTorch model. Doing this in fairly easy in PyTorch. .. note:: - The specific implementation of ``get_weights()`` and - ``set_weights()`` depends on the type of models you use. The ones - shown below work for a wide range of PyTorch models but you might - need to adjust them if you have more exotic model architectures. - -.. code:: python - - def get_weights(net): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - - def set_weights(net, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - -The rest of the functionality is directly inspired by the centralized -case. The ``fit()`` method in the client trains the model using the -local dataset. Similarly, the ``evaluate()`` method is used to evaluate -the model received on a held-out validation set that the client might -have: - -.. code:: python - - class FlowerClient(NumPyClient): - def __init__(self, net, trainloader, valloader, local_epochs): - self.net = net - self.trainloader = trainloader - self.valloader = valloader - self.local_epochs = local_epochs - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.net.to(device) - - def fit(self, parameters, config): - set_weights(self.net, parameters) - results = train( - self.net, - self.trainloader, - self.valloader, - self.local_epochs, - self.device, - ) - return get_weights(self.net), len(self.trainloader.dataset), results - - def evaluate(self, parameters, config): - set_weights(self.net, parameters) - loss, accuracy = test(self.net, self.valloader, self.device) - return loss, len(self.valloader.dataset), {"accuracy": accuracy} - -Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` -defined above by means of a ``client_fn()`` callback. Note that the -`context` enables you to get access to hyperparemeters defined in your -``pyproject.toml`` to configure the run. In this tutorial we access the -`local-epochs` setting to control the number of epochs a ``ClientApp`` -will perform when running the ``fit()`` method. You could define + The specific implementation of ``get_weights()`` and ``set_weights()`` depends on + the type of models you use. The ones shown below work for a wide range of PyTorch + models but you might need to adjust them if you have more exotic model + architectures. + +.. code-block:: python + + def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + + def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + +The rest of the functionality is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: + +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, valloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(device) + + def fit(self, parameters, config): + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define additioinal hyperparameters in ``pyproject.toml`` and access them here. -.. code:: python +.. code-block:: python - def client_fn(context: Context): - # Load model and data - net = Net() - partition_id = context.node_config["partition-id"] - num_partitions = context.node_config["num-partitions"] - trainloader, valloader = load_data(partition_id, num_partitions) - local_epochs = context.run_config["local-epochs"] + def client_fn(context: Context): + # Load model and data + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] - # Return Client instance - return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() - # Flower ClientApp - app = ClientApp(client_fn) + # Flower ClientApp + app = ClientApp(client_fn) -*************** - The ServerApp -*************** +The ServerApp +------------- -To construct a ``ServerApp`` we define a ``server_fn()`` callback with -an identical signature to that of ``client_fn()`` but the return type is -`ServerAppComponents +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is `ServerAppComponents `_ as opposed to a `Client -`_. -In this example we use the `FedAvg`. To it we pass a randomly -initialized model that will server as the global model to federated. -Note that the value of ``fraction_fit`` is read from the run config. You -can find the default value defined in the ``pyproject.toml``. +`_. In this +example we use the `FedAvg`. To it we pass a randomly initialized model that will server +as the global model to federated. Note that the value of ``fraction_fit`` is read from +the run config. You can find the default value defined in the ``pyproject.toml``. -.. code:: python +.. code-block:: python - def server_fn(context: Context): - # Read from config - num_rounds = context.run_config["num-server-rounds"] - fraction_fit = context.run_config["fraction-fit"] + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] - # Initialize model parameters - ndarrays = get_weights(Net()) - parameters = ndarrays_to_parameters(ndarrays) + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) - # Define strategy - strategy = FedAvg( - fraction_fit=fraction_fit, - fraction_evaluate=1.0, - min_available_clients=2, - initial_parameters=parameters, - ) - config = ServerConfig(num_rounds=num_rounds) + # Define strategy + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) - return ServerAppComponents(strategy=strategy, config=config) + return ServerAppComponents(strategy=strategy, config=config) - # Create ServerApp - app = ServerApp(server_fn=server_fn) + # Create ServerApp + app = ServerApp(server_fn=server_fn) -Congratulations! You've successfully built and run your first federated -learning system. +Congratulations! You've successfully built and run your first federated learning system. .. note:: - Check the `source code - `_ - of the extended version of this tutorial in - ``examples/quickstart-pytorch`` in the Flower GitHub repository. + Check the `source code + `_ of the + extended version of this tutorial in ``examples/quickstart-pytorch`` in the Flower + GitHub repository. -**************** - Video tutorial -**************** +Video tutorial +-------------- .. note:: - The video shown below shows how to setup a PyTorch + Flower project - using our previously recommended APIs. A new video tutorial will be - released that shows the new APIs (as the content above does) + The video shown below shows how to setup a PyTorch + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST. + :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST. .. youtube:: jOmmuzMIQ4c - :width: 100% + :width: 100% diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst index fc3b58925c06..6aea6b3d2d48 100644 --- a/doc/source/tutorial-quickstart-scikitlearn.rst +++ b/doc/source/tutorial-quickstart-scikitlearn.rst @@ -1,288 +1,336 @@ .. _quickstart-scikitlearn: - Quickstart scikit-learn ======================= -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. +In this federated learning tutorial we will learn how to train a Logistic Regression on +MNIST using Flower and scikit-learn. It is recommended to create a virtual environment +and run everything within a :doc:`virtualenv `. -In this tutorial, we will learn how to train a :code:`Logistic Regression` model on MNIST using Flower and scikit-learn. +Let's use ``flwr new`` to create a complete Flower+scikit-learn project. It will +generate all the files needed to run, by default with the Flower Simulation Engine, a +federation of 10 nodes using |fedavg|_ The dataset will be partitioned using +|flowerdatasets|_'s |iidpartitioner|_ -It is recommended to create a virtual environment and run everything within this :doc:`virtualenv `. +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -Our example consists of one *server* and two *clients* all having the same model. +.. code-block:: shell -*Clients* are responsible for generating individual model parameter updates for the model based on their local datasets. -These updates are then sent to the *server* which will aggregate them to produce an updated global model. Finally, the *server* sends this improved version of the model back to each *client*. -A complete cycle of parameters updates is called a *round*. + # In a new Python environment + $ pip install flwr -Now that we have a rough idea of what is going on, let's get started. We first need to install Flower. You can do this by running: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``sklearn``), give a name to your project, and type in your developer +name: .. code-block:: shell - $ pip install flwr + $ flwr new -Since we want to use scikit-learn, let's go ahead and install it: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: .. code-block:: shell - $ pip install scikit-learn + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md -Or simply install all dependencies using Poetry: +If you haven't yet installed the project and its dependencies, you can do so by: .. code-block:: shell - $ poetry install + # From the directory where your pyproject.toml is + $ pip install -e . +To run the project, do: -Flower Client -------------- +.. code-block:: shell + + # Run with default arguments + $ flwr run . + +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Requesting initial parameters from one random client + INFO : Received initial parameters from one random client + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 19.41s + INFO : History (loss, distributed): + INFO : round 1: 1.3447584261018466 + INFO : round 2: 0.9680018613482815 + INFO : round 3: 0.7667920399137523 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. -However, before setting up the client and server, we will define all functionalities that we need for our federated learning setup within :code:`utils.py`. The :code:`utils.py` contains different functions defining all the machine learning basics: + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 local-epochs=2" -* :code:`get_model_parameters()` - * Returns the parameters of a :code:`sklearn` LogisticRegression model -* :code:`set_model_params()` - * Sets the parameters of a :code:`sklearn` LogisticRegression model -* :code:`set_initial_params()` - * Initializes the model parameters that the Flower server will ask for +What follows is an explanation of each component in the project you just created: +dataset partition, the model, defining the ``ClientApp`` and defining the ``ServerApp``. -Please check out :code:`utils.py` `here `_ for more details. -The pre-defined functions are used in the :code:`client.py` and imported. The :code:`client.py` also requires to import several packages such as Flower and scikit-learn: +The Data +-------- + +This tutorial uses |flowerdatasets|_ to easily download and partition the `MNIST +`_ dataset. In this example you'll make +use of the |iidpartitioner|_ to generate ``num_partitions`` partitions. You can choose +|otherpartitioners|_ available in Flower Datasets. Each ``ClientApp`` will call this +function to create dataloaders with the data that correspond to their data partition. .. code-block:: python - import argparse - import warnings - - from sklearn.linear_model import LogisticRegression - from sklearn.metrics import log_loss - - import flwr as fl - import utils - from flwr_datasets import FederatedDataset + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="mnist", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("numpy") + + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + # Split the on edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + +The Model +--------- -Prior to local training, we need to load the MNIST dataset, a popular image classification dataset of handwritten digits for machine learning, and partition the dataset for FL. This can be conveniently achieved using `Flower Datasets `_. -The :code:`FederatedDataset.load_partition()` method loads the partitioned training set for each partition ID defined in the :code:`--partition-id` argument. +We define the |logisticregression|_ model from scikit-learn in the ``get_model()`` +function: .. code-block:: python - if __name__ == "__main__": - N_CLIENTS = 10 - - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, N_CLIENTS), - required=True, - help="Specifies the artificial data partition", + def get_model(penalty: str, local_epochs: int): + + return LogisticRegression( + penalty=penalty, + max_iter=local_epochs, + warm_start=True, ) - args = parser.parse_args() - partition_id = args.partition_id - - fds = FederatedDataset(dataset="mnist", partitioners={"train": N_CLIENTS}) - - dataset = fds.load_partition(partition_id, "train").with_format("numpy") - X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] - - X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] - y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] +To perform the training and evaluation, we will make use of the ``.fit()`` and +``.score()`` methods available in the ``LogisticRegression`` class. + +The ClientApp +------------- -Next, the logistic regression model is defined and initialized with :code:`utils.set_initial_params()`. +The main changes we have to make to use scikit-learn with Flower will be found in the +``get_model_params()``, ``set_model_params()``, and ``set_initial_params()`` functions. +In ``get_model_params()``, the coefficients and intercept of the logistic regression +model are extracted and represented as a list of NumPy arrays. In +``set_model_params()``, that's the opposite: given a list of NumPy arrays it applies +them to an existing ``LogisticRegression`` model. Finally, in ``set_initial_params()``, +we initialize the model parameters based on the MNIST dataset, which has 10 classes +(corresponding to the 10 digits) and 784 features (corresponding to the size of the +MNIST image array, which is 28 × 28). Doing this is fairly easy in scikit-learn. .. code-block:: python - model = LogisticRegression( - penalty="l2", - max_iter=1, # local epoch - warm_start=True, # prevent refreshing weights when fitting - ) + def get_model_params(model): + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [model.coef_] + return params + - utils.set_initial_params(model) - -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to fit the logistic regression we defined earlier). - -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses scikit-learn. -Implementing :code:`NumPyClient` usually means defining the following methods -(:code:`set_parameters` is optional though): - -#. :code:`get_parameters` - * return the model weight as a list of NumPy ndarrays -#. :code:`set_parameters` (optional) - * update the local model weights with the parameters received from the server - * is directly imported with :code:`utils.set_model_params()` -#. :code:`fit` - * set the local model weights - * train the local model - * return the updated local model weights -#. :code:`evaluate` - * test the local model - -The methods can be implemented in the following way: + def set_model_params(model, params): + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + + def set_initial_params(model): + n_classes = 10 # MNIST has 10 classes + n_features = 784 # Number of features in dataset + model.classes_ = np.array([i for i in range(10)]) + + model.coef_ = np.zeros((n_classes, n_features)) + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) + +The rest of the functionality is directly inspired by the centralized case: .. code-block:: python - class MnistClient(fl.client.NumPyClient): - def get_parameters(self, config): # type: ignore - return utils.get_model_parameters(model) + class FlowerClient(NumPyClient): + def __init__(self, model, X_train, X_test, y_train, y_test): + self.model = model + self.X_train = X_train + self.X_test = X_test + self.y_train = y_train + self.y_test = y_test + + def fit(self, parameters, config): + set_model_params(self.model, parameters) - def fit(self, parameters, config): # type: ignore - utils.set_model_params(model, parameters) + # Ignore convergence failure due to low local epochs with warnings.catch_warnings(): warnings.simplefilter("ignore") - model.fit(X_train, y_train) - print(f"Training finished for round {config['server_round']}") - return utils.get_model_parameters(model), len(X_train), {} + self.model.fit(self.X_train, self.y_train) - def evaluate(self, parameters, config): # type: ignore - utils.set_model_params(model, parameters) - loss = log_loss(y_test, model.predict_proba(X_test)) - accuracy = model.score(X_test, y_test) - return loss, len(X_test), {"accuracy": accuracy} + return get_model_params(self.model), len(self.X_train), {} + def evaluate(self, parameters, config): + set_model_params(self.model, parameters) + loss = log_loss(self.y_test, self.model.predict_proba(self.X_test)) + accuracy = self.model.score(self.X_test, self.y_test) + return loss, len(self.X_test), {"accuracy": accuracy} -We can now create an instance of our class :code:`MnistClient` and add one line -to actually run this client: +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the ``context`` enables you to get access +to hyperparemeters defined in your ``pyproject.toml`` to configure the run. In this +tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method. You could define +additioinal hyperparameters in ``pyproject.toml`` and access them here. .. code-block:: python - fl.client.start_client("0.0.0.0:8080", client=MnistClient().to_client()) + def client_fn(context: Context): + # Load data and model + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + X_train, X_test, y_train, y_test = load_data(partition_id, num_partitions) + penalty = context.run_config["penalty"] + local_epochs = context.run_config["local-epochs"] + model = get_model(penalty, local_epochs) -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"0.0.0.0:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"0.0.0.0:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we pass to the client. + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) -Flower Server -------------- + # Return Client instance + return FlowerClient(model, X_train, X_test, y_train, y_test).to_client() -The following Flower server is a little bit more advanced and returns an evaluation function for the server-side evaluation. -First, we import again all required libraries such as Flower and scikit-learn. -:code:`server.py`, import Flower and start the server: + # Flower ClientApp + app = ClientApp(client_fn) + +The ServerApp +------------- + +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is |serverappcomponents|_ as +opposed to a |client|_ In this example we use the `FedAvg` strategy. To it we pass a +zero-initialized model that will server as the global model to be federated. Note that +the values of ``num-server-rounds``, ``penalty``, and ``local-epochs`` are read from the +run config. You can find the default values defined in the ``pyproject.toml``. .. code-block:: python - import flwr as fl - import utils - from flwr.common import NDArrays, Scalar - from sklearn.metrics import log_loss - from sklearn.linear_model import LogisticRegression - from typing import Dict - - from flwr_datasets import FederatedDataset + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] -The number of federated learning rounds is set in :code:`fit_round()` and the evaluation is defined in :code:`get_evaluate_fn()`. -The evaluation function is called after each federated learning round and gives you information about loss and accuracy. -Note that we also make use of Flower Datasets here to load the test split of the MNIST dataset for server-side evaluation. + # Create LogisticRegression Model + penalty = context.run_config["penalty"] + local_epochs = context.run_config["local-epochs"] + model = get_model(penalty, local_epochs) -.. code-block:: python + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) + + initial_parameters = ndarrays_to_parameters(get_model_params(model)) - def fit_round(server_round: int) -> Dict: - """Send round number to client.""" - return {"server_round": server_round} + # Define strategy + strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + return ServerAppComponents(strategy=strategy, config=config) - def get_evaluate_fn(model: LogisticRegression): - """Return an evaluation function for server-side evaluation.""" - fds = FederatedDataset(dataset="mnist", partitioners={"train": 10}) - dataset = fds.load_split("test").with_format("numpy") - X_test, y_test = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + # Create ServerApp + app = ServerApp(server_fn=server_fn) - def evaluate( - server_round: int, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: - utils.set_model_params(model, parameters) - loss = log_loss(y_test, model.predict_proba(X_test)) - accuracy = model.score(X_test, y_test) - return loss, {"accuracy": accuracy} +Congratulations! You've successfully built and run your first federated learning system +in scikit-learn. - return evaluate +.. note:: -The :code:`main` contains the server-side parameter initialization :code:`utils.set_initial_params()` as well as the aggregation strategy :code:`fl.server.strategy:FedAvg()`. The strategy is the default one, federated averaging (or FedAvg), with two clients and evaluation after each federated learning round. The server can be started with the command :code:`fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`. + Check the source code of the extended version of this tutorial in + |quickstart_sklearn_link|_ in the Flower GitHub repository. -.. code-block:: python +.. |client| replace:: ``Client`` - # Start Flower server for three rounds of federated learning - if __name__ == "__main__": - model = LogisticRegression() - utils.set_initial_params(model) - strategy = fl.server.strategy.FedAvg( - min_available_clients=2, - evaluate_fn=get_evaluate_fn(model), - on_fit_config_fn=fit_round, - ) - fl.server.start_server(server_address="0.0.0.0:8080", strategy=strategy, config=fl.server.ServerConfig(num_rounds=3)) +.. |fedavg| replace:: ``FedAvg`` +.. |flowerdatasets| replace:: Flower Datasets -Train the model, federated! ---------------------------- +.. |iidpartitioner| replace:: ``IidPartitioner`` -With both client and server ready, we can now run everything and see federated -learning in action. Federated learning systems usually have a server and multiple clients. We, therefore, have to start the server first: +.. |logisticregression| replace:: ``LogisticRegression`` -.. code-block:: shell +.. |otherpartitioners| replace:: other partitioners - $ python3 server.py +.. |serverappcomponents| replace:: ``ServerAppComponents`` -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +.. |quickstart_sklearn_link| replace:: ``examples/sklearn-logreg-mnist`` -.. code-block:: shell +.. _client: ref-api/flwr.client.Client.html#client - $ python3 client.py +.. _fedavg: ref-api/flwr.server.strategy.FedAvg.html#flwr.server.strategy.FedAvg -Open another terminal and start the second client: +.. _flowerdatasets: https://flower.ai/docs/datasets/ -.. code-block:: shell +.. _iidpartitioner: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner - $ python3 client.py +.. _logisticregression: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): +.. _otherpartitioners: https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html -.. code-block:: shell +.. _quickstart_sklearn_link: https://github.com/adap/flower/tree/main/examples/sklearn-logreg-mnist - INFO flower 2022-01-13 13:43:14,859 | app.py:73 | Flower server running (insecure, 3 rounds) - INFO flower 2022-01-13 13:43:14,859 | server.py:118 | Getting initial parameters - INFO flower 2022-01-13 13:43:17,903 | server.py:306 | Received initial parameters from one random client - INFO flower 2022-01-13 13:43:17,903 | server.py:120 | Evaluating initial parameters - INFO flower 2022-01-13 13:43:17,992 | server.py:123 | initial parameters (loss, other metrics): 2.3025850929940455, {'accuracy': 0.098} - INFO flower 2022-01-13 13:43:17,992 | server.py:133 | FL starting - DEBUG flower 2022-01-13 13:43:19,814 | server.py:251 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2022-01-13 13:43:20,046 | server.py:260 | fit_round received 2 results and 0 failures - INFO flower 2022-01-13 13:43:20,220 | server.py:148 | fit progress: (1, 1.3365667871792377, {'accuracy': 0.6605}, 2.227397900000142) - INFO flower 2022-01-13 13:43:20,220 | server.py:199 | evaluate_round: no clients selected, cancel - DEBUG flower 2022-01-13 13:43:20,220 | server.py:251 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2022-01-13 13:43:20,456 | server.py:260 | fit_round received 2 results and 0 failures - INFO flower 2022-01-13 13:43:20,603 | server.py:148 | fit progress: (2, 0.721620492535375, {'accuracy': 0.7796}, 2.6108531999998377) - INFO flower 2022-01-13 13:43:20,603 | server.py:199 | evaluate_round: no clients selected, cancel - DEBUG flower 2022-01-13 13:43:20,603 | server.py:251 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2022-01-13 13:43:20,837 | server.py:260 | fit_round received 2 results and 0 failures - INFO flower 2022-01-13 13:43:20,967 | server.py:148 | fit progress: (3, 0.5843629244915138, {'accuracy': 0.8217}, 2.9750180000010005) - INFO flower 2022-01-13 13:43:20,968 | server.py:199 | evaluate_round: no clients selected, cancel - INFO flower 2022-01-13 13:43:20,968 | server.py:172 | FL finished in 2.975252800000817 - INFO flower 2022-01-13 13:43:20,968 | app.py:109 | app_fit: losses_distributed [] - INFO flower 2022-01-13 13:43:20,968 | app.py:110 | app_fit: metrics_distributed {} - INFO flower 2022-01-13 13:43:20,968 | app.py:111 | app_fit: losses_centralized [(0, 2.3025850929940455), (1, 1.3365667871792377), (2, 0.721620492535375), (3, 0.5843629244915138)] - INFO flower 2022-01-13 13:43:20,968 | app.py:112 | app_fit: metrics_centralized {'accuracy': [(0, 0.098), (1, 0.6605), (2, 0.7796), (3, 0.8217)]} - DEBUG flower 2022-01-13 13:43:20,968 | server.py:201 | evaluate_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2022-01-13 13:43:21,232 | server.py:210 | evaluate_round received 2 results and 0 failures - INFO flower 2022-01-13 13:43:21,232 | app.py:121 | app_evaluate: federated loss: 0.5843629240989685 - INFO flower 2022-01-13 13:43:21,232 | app.py:122 | app_evaluate: results [('ipv4:127.0.0.1:53980', EvaluateRes(loss=0.5843629240989685, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.8217})), ('ipv4:127.0.0.1:53982', EvaluateRes(loss=0.5843629240989685, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.8217}))] - INFO flower 2022-01-13 13:43:21,232 | app.py:127 | app_evaluate: failures [] - -Congratulations! -You've successfully built and run your first federated learning system. -The full `source code `_ for this example can be found in :code:`examples/sklearn-logreg-mnist`. +.. _serverappcomponents: ref-api/flwr.server.ServerAppComponents.html#serverappcomponents + +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model. diff --git a/doc/source/tutorial-quickstart-tensorflow.rst b/doc/source/tutorial-quickstart-tensorflow.rst index bd63eb461d21..66cf69de6390 100644 --- a/doc/source/tutorial-quickstart-tensorflow.rst +++ b/doc/source/tutorial-quickstart-tensorflow.rst @@ -1,171 +1,290 @@ .. _quickstart-tensorflow: - Quickstart TensorFlow ===================== -.. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a MobilNetV2 model on CIFAR-10. +In this tutorial we will learn how to train a Convolutional Neural Network on CIFAR-10 +using the Flower framework and TensorFlow. First of all, it is recommended to create a +virtual environment and run everything within a :doc:`virtualenv +`. + +Let's use `flwr new` to create a complete Flower+TensorFlow project. It will generate +all the files needed to run, by default with the Flower Simulation Engine, a federation +of 10 nodes using `FedAvg +`_. +The dataset will be partitioned using Flower Dataset's `IidPartitioner +`_. + +Now that we have a rough idea of what this example is about, let's get started. First, +install Flower in your new environment: -.. youtube:: FGTc2TQq7VM - :width: 100% +.. code-block:: shell -Let's build a federated learning system in less than 20 lines of code! + # In a new Python environment + $ pip install flwr -Before Flower can be imported we have to install it: +Then, run the command below. You will be prompted to select one of the available +templates (choose ``TensorFlow``), give a name to your project, and type in your +developer name: .. code-block:: shell - $ pip install flwr + $ flwr new -Since we want to use the Keras API of TensorFlow (TF), we have to install TF as well: +After running it you'll notice a new directory with your project name has been created. +It should have the following structure: .. code-block:: shell - $ pip install tensorflow + + ├── + │ ├── __init__.py + │ ├── client_app.py # Defines your ClientApp + │ ├── server_app.py # Defines your ServerApp + │ └── task.py # Defines your model, training and data loading + ├── pyproject.toml # Project metadata like dependencies and configs + └── README.md +If you haven't yet installed the project and its dependencies, you can do so by: -Flower Client -------------- +.. code-block:: shell -Next, in a file called :code:`client.py`, import Flower and TensorFlow: + # From the directory where your pyproject.toml is + $ pip install -e . -.. code-block:: python +To run the project, do: - import flwr as fl - import tensorflow as tf +.. code-block:: shell -We use the Keras utilities of TF to load CIFAR10, a popular colored image classification -dataset for machine learning. The call to -:code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches it locally, -and then returns the entire training and test set as NumPy ndarrays. + # Run with default arguments + $ flwr run . -.. code-block:: python +With default arguments you will see an output like this one: + +.. code-block:: shell + + Loading project configuration... + Success + INFO : Starting Flower ServerApp, config: num_rounds=3, no round_timeout + INFO : + INFO : [INIT] + INFO : Using initial global parameters provided by strategy + INFO : Starting evaluation of initial global parameters + INFO : Evaluation returned no results (`None`) + INFO : + INFO : [ROUND 1] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + WARNING : No fit_metrics_aggregation_fn provided + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + WARNING : No evaluate_metrics_aggregation_fn provided + INFO : + INFO : [ROUND 2] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [ROUND 3] + INFO : configure_fit: strategy sampled 10 clients (out of 10) + INFO : aggregate_fit: received 10 results and 0 failures + INFO : configure_evaluate: strategy sampled 10 clients (out of 10) + INFO : aggregate_evaluate: received 10 results and 0 failures + INFO : + INFO : [SUMMARY] + INFO : Run finished 3 round(s) in 31.31s + INFO : History (loss, distributed): + INFO : round 1: 1.9066195368766785 + INFO : round 2: 1.657227087020874 + INFO : round 3: 1.559039831161499 + INFO : + +You can also override the parameters defined in the ``[tool.flwr.app.config]`` section +in ``pyproject.toml`` like this: + +.. code-block:: shell + + # Override some arguments + $ flwr run . --run-config "num-server-rounds=5 batch-size=16" - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() +The Data +-------- -Next, we need a model. For the purpose of this tutorial, we use MobilNetV2 with 10 output classes: +This tutorial uses `Flower Datasets `_ to easily +download and partition the `CIFAR-10` dataset. In this example you'll make use of the +`IidPartitioner +`_ +to generate `num_partitions` partitions. You can choose `other partitioners +`_ available in +Flower Datasets. Each ``ClientApp`` will call this function to create the ``NumPy`` +arrays that correspond to their data partition. .. code-block:: python - model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id, "train") + partition.set_format("numpy") -The Flower server interacts with clients through an interface called -:code:`Client`. When the server selects a particular client for training, it -sends training instructions over the network. The client receives those -instructions and calls one of the :code:`Client` methods to run your code -(i.e., to train the neural network we defined earlier). + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"] -Flower provides a convenience class called :code:`NumPyClient` which makes it -easier to implement the :code:`Client` interface when your workload uses Keras. -The :code:`NumPyClient` interface defines three methods which can be -implemented in the following way: +The Model +--------- + +Next, we need a model. We defined a simple Convolutional Neural Network (CNN), but feel +free to replace it with a more sophisticated model if you'd like: .. code-block:: python - class CifarClient(fl.client.NumPyClient): - def get_parameters(self, config): - return model.get_weights() + def load_model(learning_rate: float = 0.001): + # Define a simple CNN for CIFAR-10 and set Adam optimizer + model = keras.Sequential( + [ + keras.Input(shape=(32, 32, 3)), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(10, activation="softmax"), + ] + ) + model.compile( + "adam", + loss="sparse_categorical_crossentropy", + metrics=["accuracy"], + ) + return model + +The ClientApp +------------- - def fit(self, parameters, config): - model.set_weights(parameters) - model.fit(x_train, y_train, epochs=1, batch_size=32, steps_per_epoch=3) - return model.get_weights(), len(x_train), {} +With `TensorFlow`, we can use the built-in ``get_weights()`` and ``set_weights()`` +functions, which simplifies the implementation with `Flower`. The rest of the +functionality in the ClientApp is directly inspired by the centralized case. The +``fit()`` method in the client trains the model using the local dataset. Similarly, the +``evaluate()`` method is used to evaluate the model received on a held-out validation +set that the client might have: - def evaluate(self, parameters, config): - model.set_weights(parameters) - loss, accuracy = model.evaluate(x_test, y_test) - return loss, len(x_test), {"accuracy": float(accuracy)} +.. code-block:: python + + class FlowerClient(NumPyClient): + def __init__(self, model, data, epochs, batch_size, verbose): + self.model = model + self.x_train, self.y_train, self.x_test, self.y_test = data + self.epochs = epochs + self.batch_size = batch_size + self.verbose = verbose + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=self.epochs, + batch_size=self.batch_size, + verbose=self.verbose, + ) + return self.model.get_weights(), len(self.x_train), {} -We can now create an instance of our class :code:`CifarClient` and add one line -to actually run this client: + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0) + return loss, len(self.x_test), {"accuracy": accuracy} + +Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` defined above by +means of a ``client_fn()`` callback. Note that the `context` enables you to get access +to hyperparameters defined in your ``pyproject.toml`` to configure the run. For example, +in this tutorial we access the `local-epochs` setting to control the number of epochs a +``ClientApp`` will perform when running the ``fit()`` method, in addition to +`batch-size`. You could define additional hyperparameters in ``pyproject.toml`` and +access them here. .. code-block:: python - fl.client.start_client(server_address="[::]:8080", client=CifarClient().to_client()) + def client_fn(context: Context): + # Load model and data + net = load_model() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + data = load_data(partition_id, num_partitions) + epochs = context.run_config["local-epochs"] + batch_size = context.run_config["batch-size"] + verbose = context.run_config.get("verbose") -That's it for the client. We only have to implement :code:`Client` or -:code:`NumPyClient` and call :code:`fl.client.start_client()`. If you implement a client of type :code:`NumPyClient` you'll need to first call its :code:`to_client()` method. The string :code:`"[::]:8080"` tells the client which server to connect to. In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. + # Return Client instance + return FlowerClient(net, data, epochs, batch_size, verbose).to_client() -Flower Server + # Flower ClientApp + app = ClientApp(client_fn=client_fn) + +The ServerApp ------------- -For simple workloads we can start a Flower server and leave all the -configuration possibilities at their default values. In a file named -:code:`server.py`, import Flower and start the server: +To construct a ``ServerApp`` we define a ``server_fn()`` callback with an identical +signature to that of ``client_fn()`` but the return type is `ServerAppComponents +`_ +as opposed to a `Client +`_. In this +example we use the `FedAvg`. To it we pass a randomly initialized model that will serve +as the global model to federate. .. code-block:: python - import flwr as fl + def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] - fl.server.start_server(config=fl.server.ServerConfig(num_rounds=3)) + # Get parameters to initialize global model + parameters = ndarrays_to_parameters(load_model().get_weights()) + # Define strategy + strategy = strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) -Train the model, federated! ---------------------------- + return ServerAppComponents(strategy=strategy, config=config) -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: -.. code-block:: shell + # Create ServerApp + app = ServerApp(server_fn=server_fn) - $ python server.py +Congratulations! You've successfully built and run your first federated learning system. -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +.. note:: -.. code-block:: shell + Check the source code of the extended version of this tutorial in + |quickstart_tf_link|_ in the Flower GitHub repository. - $ python client.py +.. |quickstart_tf_link| replace:: ``examples/quickstart-tensorflow`` -Open another terminal and start the second client: +.. _quickstart_tf_link: https://github.com/adap/flower/blob/main/examples/quickstart-tensorflow -.. code-block:: shell +Video tutorial +-------------- - $ python client.py +.. note:: -Each client will have its own dataset. + The video shown below shows how to setup a TensorFlow + Flower project using our + previously recommended APIs. A new video tutorial will be released that shows the + new APIs (as the content above does) -You should now see how the training does in the very first terminal (the one -that started the server): - -.. code-block:: shell +.. meta:: + :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a CNN model on CIFAR-10. - INFO flower 2021-02-25 14:15:46,741 | app.py:76 | Flower server running (insecure, 3 rounds) - INFO flower 2021-02-25 14:15:46,742 | server.py:72 | Getting initial parameters - INFO flower 2021-02-25 14:16:01,770 | server.py:74 | Evaluating initial parameters - INFO flower 2021-02-25 14:16:01,770 | server.py:87 | [TIME] FL starting - DEBUG flower 2021-02-25 14:16:12,341 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:21:17,235 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:21:17,512 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:21:29,628 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:21:29,696 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:25:59,917 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:26:00,227 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:26:11,457 | server.py:149 | evaluate received 2 results and 0 failures - DEBUG flower 2021-02-25 14:26:11,530 | server.py:165 | fit_round: strategy sampled 2 clients (out of 2) - DEBUG flower 2021-02-25 14:30:43,389 | server.py:177 | fit_round received 2 results and 0 failures - DEBUG flower 2021-02-25 14:30:43,630 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:30:53,384 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:30:53,384 | server.py:122 | [TIME] FL finished in 891.6143046000007 - INFO flower 2021-02-25 14:30:53,385 | app.py:109 | app_fit: losses_distributed [(1, 2.3196680545806885), (2, 2.3202896118164062), (3, 2.1818180084228516)] - INFO flower 2021-02-25 14:30:53,385 | app.py:110 | app_fit: accuracies_distributed [] - INFO flower 2021-02-25 14:30:53,385 | app.py:111 | app_fit: losses_centralized [] - INFO flower 2021-02-25 14:30:53,385 | app.py:112 | app_fit: accuracies_centralized [] - DEBUG flower 2021-02-25 14:30:53,442 | server.py:139 | evaluate: strategy sampled 2 clients - DEBUG flower 2021-02-25 14:31:02,848 | server.py:149 | evaluate received 2 results and 0 failures - INFO flower 2021-02-25 14:31:02,848 | app.py:121 | app_evaluate: federated loss: 2.1818180084228516 - INFO flower 2021-02-25 14:31:02,848 | app.py:125 | app_evaluate: results [('ipv4:127.0.0.1:57158', EvaluateRes(loss=2.1818180084228516, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.21610000729560852})), ('ipv4:127.0.0.1:57160', EvaluateRes(loss=2.1818180084228516, num_examples=10000, accuracy=0.0, metrics={'accuracy': 0.21610000729560852}))] - INFO flower 2021-02-25 14:31:02,848 | app.py:127 | app_evaluate: failures [] flower 2020-07-15 10:07:56,396 | app.py:77 | app_evaluate: failures [] - -Congratulations! You've successfully built and run your first federated -learning system. The full `source code `_ for this can be found in -:code:`examples/quickstart-tensorflow/client.py`. +.. youtube:: FGTc2TQq7VM + :width: 100% diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 34ad5f6e99c0..fe15227fdf11 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -1,63 +1,75 @@ .. _quickstart-xgboost: - Quickstart XGBoost -===================== +================== .. meta:: - :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. + :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. -.. youtube:: AY1vpXUpesc - :width: 100% +.. youtube:: AY1vpXUpesc + :width: 100% Federated XGBoost -------------------- +----------------- -EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries for boosted tree methods. -It's primarily designed to enhance both the performance and computational speed of machine learning models. -In XGBoost, trees are constructed concurrently, unlike the sequential approach taken by GBDT. +EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of +gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries +for boosted tree methods. It's primarily designed to enhance both the performance and +computational speed of machine learning models. In XGBoost, trees are constructed +concurrently, unlike the sequential approach taken by GBDT. -Often, for tabular data on medium-sized datasets with fewer than 10k training examples, XGBoost surpasses the results of deep learning techniques. +Often, for tabular data on medium-sized datasets with fewer than 10k training examples, +XGBoost surpasses the results of deep learning techniques. Why federated XGBoost? -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Indeed, as the demand for data privacy and decentralized learning grows, there's an increasing requirement to implement federated XGBoost systems for specialised applications, like survival analysis and financial fraud detection. +~~~~~~~~~~~~~~~~~~~~~~ -Federated learning ensures that raw data remains on the local device, making it an attractive approach for sensitive domains where data security and privacy are paramount. -Given the robustness and efficiency of XGBoost, combining it with federated learning offers a promising solution for these specific challenges. +Indeed, as the demand for data privacy and decentralized learning grows, there's an +increasing requirement to implement federated XGBoost systems for specialised +applications, like survival analysis and financial fraud detection. -In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset using Flower and :code:`xgboost` package. -We use a simple example (`full code xgboost-quickstart `_) with two *clients* and one *server* -to demonstrate how federated XGBoost works, -and then we dive into a more complex example (`full code xgboost-comprehensive `_) to run various experiments. +Federated learning ensures that raw data remains on the local device, making it an +attractive approach for sensitive domains where data security and privacy are paramount. +Given the robustness and efficiency of XGBoost, combining it with federated learning +offers a promising solution for these specific challenges. +In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset +using Flower and ``xgboost`` package. We use a simple example (`full code +xgboost-quickstart +`_) with two +*clients* and one *server* to demonstrate how federated XGBoost works, and then we dive +into a more complex example (`full code xgboost-comprehensive +`_) to run +various experiments. Environment Setup --------------------- +----------------- -First of all, it is recommended to create a virtual environment and run everything within a :doc:`virtualenv `. +First of all, it is recommended to create a virtual environment and run everything +within a :doc:`virtualenv `. We first need to install Flower and Flower Datasets. You can do this by running : .. code-block:: shell - $ pip install flwr flwr-datasets + $ pip install flwr flwr-datasets -Since we want to use :code:`xgboost` package to build up XGBoost trees, let's go ahead and install :code:`xgboost`: +Since we want to use ``xgboost`` package to build up XGBoost trees, let's go ahead and +install ``xgboost``: .. code-block:: shell - $ pip install xgboost - + $ pip install xgboost Flower Client ------------------ +------------- -*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. -Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. +*Clients* are responsible for generating individual weight-updates for the model based +on their local datasets. Now that we have all our dependencies installed, let's run a +simple distributed training with two clients and one server. -In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and other related functions: +In a file called ``client.py``, import xgboost, Flower, Flower Datasets and other +related functions: .. code-block:: python @@ -84,9 +96,10 @@ In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and from flwr_datasets.partitioner import IidPartitioner Dataset partition and hyper-parameter selection -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Prior to local training, we require loading the HIGGS dataset from Flower Datasets and conduct data partitioning for FL: +Prior to local training, we require loading the HIGGS dataset from Flower Datasets and +conduct data partitioning for FL: .. code-block:: python @@ -99,8 +112,9 @@ Prior to local training, we require loading the HIGGS dataset from Flower Datase partition = fds.load_partition(partition_id=args.partition_id, split="train") partition.set_format("numpy") -In this example, we split the dataset into 30 partitions with uniform distribution (:code:`IidPartitioner(num_partitions=30)`). -Then, we load the partition for the given client based on :code:`partition_id`: +In this example, we split the dataset into 30 partitions with uniform distribution +(``IidPartitioner(num_partitions=30)``). Then, we load the partition for the given +client based on ``partition_id``: .. code-block:: python @@ -118,7 +132,8 @@ Then, we load the partition for the given client based on :code:`partition_id`: partition = fds.load_partition(idx=args.partition_id, split="train") partition.set_format("numpy") -After that, we do train/test splitting on the given partition (client's local data), and transform data format for :code:`xgboost` package. +After that, we do train/test splitting on the given partition (client's local data), and +transform data format for ``xgboost`` package. .. code-block:: python @@ -131,7 +146,8 @@ After that, we do train/test splitting on the given partition (client's local da train_dmatrix = transform_dataset_to_dmatrix(train_data) valid_dmatrix = transform_dataset_to_dmatrix(valid_data) -The functions of :code:`train_test_split` and :code:`transform_dataset_to_dmatrix` are defined as below: +The functions of ``train_test_split`` and ``transform_dataset_to_dmatrix`` are defined +as below: .. code-block:: python @@ -171,40 +187,39 @@ Finally, we define the hyper-parameters used for XGBoost training. "tree_method": "hist", } -The :code:`num_local_round` represents the number of iterations for local tree boost. -We use CPU for the training in default. -One can shift it to GPU by setting :code:`tree_method` to :code:`gpu_hist`. -We use AUC as evaluation metric. - +The ``num_local_round`` represents the number of iterations for local tree boost. We use +CPU for the training in default. One can shift it to GPU by setting ``tree_method`` to +``gpu_hist``. We use AUC as evaluation metric. Flower client definition for XGBoost -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -After loading the dataset we define the Flower client. -We follow the general rule to define :code:`XgbClient` class inherited from :code:`fl.client.Client`. +After loading the dataset we define the Flower client. We follow the general rule to +define ``XgbClient`` class inherited from ``fl.client.Client``. .. code-block:: python class XgbClient(fl.client.Client): - def __init__( - self, - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ): - self.train_dmatrix = train_dmatrix - self.valid_dmatrix = valid_dmatrix - self.num_train = num_train - self.num_val = num_val - self.num_local_round = num_local_round - self.params = params - -All required parameters defined above are passed to :code:`XgbClient`'s constructor. - -Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` methods insides :code:`XgbClient` class as follows. + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params + +All required parameters defined above are passed to ``XgbClient``'s constructor. + +Then, we override ``get_parameters``, ``fit`` and ``evaluate`` methods insides +``XgbClient`` class as follows. .. code-block:: python @@ -218,9 +233,10 @@ Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` metho parameters=Parameters(tensor_type="", tensors=[]), ) -Unlike neural network training, XGBoost trees are not started from a specified random weights. -In this case, we do not use :code:`get_parameters` and :code:`set_parameters` to initialise model parameters for XGBoost. -As a result, let's return an empty tensor in :code:`get_parameters` when it is called by the server at the first round. +Unlike neural network training, XGBoost trees are not started from a specified random +weights. In this case, we do not use ``get_parameters`` and ``set_parameters`` to +initialise model parameters for XGBoost. As a result, let's return an empty tensor in +``get_parameters`` when it is called by the server at the first round. .. code-block:: python @@ -259,9 +275,10 @@ As a result, let's return an empty tensor in :code:`get_parameters` when it is c metrics={}, ) -In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up the first set of trees. -From the second round, we load the global model sent from server to new build Booster object, -and then update model weights on local training data with function :code:`local_boost` as follows: +In ``fit``, at the first round, we call ``xgb.train()`` to build up the first set of +trees. From the second round, we load the global model sent from server to new build +Booster object, and then update model weights on local training data with function +``local_boost`` as follows: .. code-block:: python @@ -278,8 +295,8 @@ and then update model weights on local training data with function :code:`local_ return bst -Given :code:`num_local_round`, we update trees by calling :code:`bst_input.update` method. -After training, the last :code:`N=num_local_round` trees will be extracted to send to the server. +Given ``num_local_round``, we update trees by calling ``bst_input.update`` method. After +training, the last ``N=num_local_round`` trees will be extracted to send to the server. .. code-block:: python @@ -310,40 +327,42 @@ After training, the last :code:`N=num_local_round` trees will be extracted to se metrics={"AUC": auc}, ) -In :code:`evaluate`, after loading the global model, we call :code:`bst.eval_set` function to conduct evaluation on valid set. -The AUC value will be returned. +In ``evaluate``, after loading the global model, we call ``bst.eval_set`` function to +conduct evaluation on valid set. The AUC value will be returned. -Now, we can create an instance of our class :code:`XgbClient` and add one line to actually run this client: +Now, we can create an instance of our class ``XgbClient`` and add one line to actually +run this client: .. code-block:: python - fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ).to_client(), - ) - -That's it for the client. We only have to implement :code:`Client` and call :code:`fl.client.start_client()`. -The string :code:`"[::]:8080"` tells the client which server to connect to. -In our case we can run the server and the client on the same machine, therefore we use -:code:`"[::]:8080"`. If we run a truly federated workload with the server and -clients running on different machines, all that needs to change is the -:code:`server_address` we point the client at. + fl.client.start_client( + server_address="127.0.0.1:8080", + client=XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + ).to_client(), + ) +That's it for the client. We only have to implement ``Client`` and call +``fl.client.start_client()``. The string ``"[::]:8080"`` tells the client which server +to connect to. In our case we can run the server and the client on the same machine, +therefore we use ``"[::]:8080"``. If we run a truly federated workload with the server +and clients running on different machines, all that needs to change is the +``server_address`` we point the client at. Flower Server ------------------- +------------- -These updates are then sent to the *server* which will aggregate them to produce a better model. -Finally, the *server* sends this improved version of the model back to each *client* to finish a complete FL round. +These updates are then sent to the *server* which will aggregate them to produce a +better model. Finally, the *server* sends this improved version of the model back to +each *client* to finish a complete FL round. -In a file named :code:`server.py`, import Flower and FedXgbBagging from :code:`flwr.server.strategy`. +In a file named ``server.py``, import Flower and FedXgbBagging from +``flwr.server.strategy``. We first define a strategy for XGBoost bagging aggregation. @@ -361,6 +380,7 @@ We first define a strategy for XGBoost bagging aggregation. on_fit_config_fn=config_func, ) + def evaluate_metrics_aggregation(eval_metrics): """Return an aggregated metric (AUC) for evaluation.""" total_num = sum([num for num, _ in eval_metrics]) @@ -370,6 +390,7 @@ We first define a strategy for XGBoost bagging aggregation. metrics_aggregated = {"AUC": auc_aggregated} return metrics_aggregated + def config_func(rnd: int) -> Dict[str, str]: """Return a configuration with global epochs.""" config = { @@ -377,9 +398,10 @@ We first define a strategy for XGBoost bagging aggregation. } return config -We use two clients for this example. -An :code:`evaluate_metrics_aggregation` function is defined to collect and wighted average the AUC values from clients. -The :code:`config_func` function is to return the current FL round number to client's :code:`fit()` and :code:`evaluate()` methods. +We use two clients for this example. An ``evaluate_metrics_aggregation`` function is +defined to collect and wighted average the AUC values from clients. The ``config_func`` +function is to return the current FL round number to client's ``fit()`` and +``evaluate()`` methods. Then, we start the server: @@ -393,12 +415,13 @@ Then, we start the server: ) Tree-based bagging aggregation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You must be curious about how bagging aggregation works. Let's look into the details. -In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. -Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:`evaluate` methods as follows: +In file ``flwr.server.strategy.fedxgb_bagging.py``, we define ``FedXgbBagging`` +inherited from ``flwr.server.strategy.FedAvg``. Then, we override the ``aggregate_fit``, +``aggregate_evaluate`` and ``evaluate`` methods as follows: .. code-block:: python @@ -493,7 +516,8 @@ Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :cod loss, metrics = eval_res return loss, metrics -In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees by calling :code:`aggregate()` function: +In ``aggregate_fit``, we sequentially aggregate the clients' XGBoost trees by calling +``aggregate()`` function: .. code-block:: python @@ -552,28 +576,27 @@ In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees b ) return tree_num, paral_tree_num -In this function, we first fetch the number of trees and the number of parallel trees for the current and previous model -by calling :code:`_get_tree_nums`. -Then, the fetched information will be aggregated. -After that, the trees (containing model weights) are aggregated to generate a new tree model. - -After traversal of all clients' models, a new global model is generated, -followed by the serialisation, and sending back to each client. +In this function, we first fetch the number of trees and the number of parallel trees +for the current and previous model by calling ``_get_tree_nums``. Then, the fetched +information will be aggregated. After that, the trees (containing model weights) are +aggregated to generate a new tree model. +After traversal of all clients' models, a new global model is generated, followed by the +serialisation, and sending back to each client. Launch Federated XGBoost! -------------------------------- +------------------------- -With both client and server ready, we can now run everything and see federated -learning in action. FL systems usually have a server and multiple clients. We -therefore have to start the server first: +With both client and server ready, we can now run everything and see federated learning +in action. FL systems usually have a server and multiple clients. We therefore have to +start the server first: .. code-block:: shell $ python3 server.py -Once the server is running we can start the clients in different terminals. -Open a new terminal and start the first client: +Once the server is running we can start the clients in different terminals. Open a new +terminal and start the first client: .. code-block:: shell @@ -585,8 +608,8 @@ Open another terminal and start the second client: $ python3 client.py --partition-id=1 -Each client will have its own dataset. -You should now see how the training does in the very first terminal (the one that started the server): +Each client will have its own dataset. You should now see how the training does in the +very first terminal (the one that started the server): .. code-block:: shell @@ -629,192 +652,197 @@ You should now see how the training does in the very first terminal (the one tha INFO : INFO : [SUMMARY] INFO : Run finished 5 round(s) in 1.67s - INFO : History (loss, distributed): - INFO : round 1: 0 - INFO : round 2: 0 - INFO : round 3: 0 - INFO : round 4: 0 - INFO : round 5: 0 - INFO : History (metrics, distributed, evaluate): - INFO : {'AUC': [(1, 0.76755), (2, 0.775), (3, 0.77935), (4, 0.7836), (5, 0.7872)]} - -Congratulations! -You've successfully built and run your first federated XGBoost system. -The AUC values can be checked in :code:`metrics_distributed`. -One can see that the average AUC increases over FL rounds. - -The full `source code `_ for this example can be found in :code:`examples/xgboost-quickstart`. - + INFO : History (loss, distributed): + INFO : round 1: 0 + INFO : round 2: 0 + INFO : round 3: 0 + INFO : round 4: 0 + INFO : round 5: 0 + INFO : History (metrics, distributed, evaluate): + INFO : {'AUC': [(1, 0.76755), (2, 0.775), (3, 0.77935), (4, 0.7836), (5, 0.7872)]} + +Congratulations! You've successfully built and run your first federated XGBoost system. +The AUC values can be checked in ``metrics_distributed``. One can see that the average +AUC increases over FL rounds. + +The full `source code +`_ for this +example can be found in ``examples/xgboost-quickstart``. Comprehensive Federated XGBoost ------------------------------------ +------------------------------- -Now that you have known how federated XGBoost work with Flower, it's time to run some more comprehensive experiments by customising the experimental settings. -In the xgboost-comprehensive example (`full code `_), -we provide more options to define various experimental setups, including aggregation strategies, data partitioning and centralised/distributed evaluation. -We also support :doc:`Flower simulation ` making it easy to simulate large client cohorts in a resource-aware manner. -Let's take a look! +Now that you have known how federated XGBoost work with Flower, it's time to run some +more comprehensive experiments by customising the experimental settings. In the +xgboost-comprehensive example (`full code +`_), we provide +more options to define various experimental setups, including aggregation strategies, +data partitioning and centralised/distributed evaluation. We also support :doc:`Flower +simulation ` making it easy to simulate large client cohorts in +a resource-aware manner. Let's take a look! Cyclic training -~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~ -In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL in a client-by-client fashion. -Instead of aggregating multiple clients, there is only one single client participating in the training per round in the cyclic training scenario. -The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. +In addition to bagging aggregation, we offer a cyclic training scheme, which performs FL +in a client-by-client fashion. Instead of aggregating multiple clients, there is only +one single client participating in the training per round in the cyclic training +scenario. The trained local XGBoost trees will be passed to the next client as an +initialised model for next round's boosting. -To do this, we first customise a :code:`ClientManager` in :code:`server_utils.py`: +To do this, we first customise a ``ClientManager`` in ``server_utils.py``: .. code-block:: python - class CyclicClientManager(SimpleClientManager): - """Provides a cyclic client selection rule.""" - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Sample a number of Flower ClientProxy instances.""" - - # Block until at least num_clients are connected. - if min_num_clients is None: - min_num_clients = num_clients - self.wait_for(min_num_clients) - - # Sample clients which meet the criterion - available_cids = list(self.clients) - if criterion is not None: - available_cids = [ - cid for cid in available_cids if criterion.select(self.clients[cid]) - ] - - if num_clients > len(available_cids): - log( - INFO, - "Sampling failed: number of available clients" - " (%s) is less than number of requested clients (%s).", - len(available_cids), - num_clients, - ) - return [] - - # Return all available clients - return [self.clients[cid] for cid in available_cids] - -The customised :code:`ClientManager` samples all available clients in each FL round based on the order of connection to the server. -Then, we define a new strategy :code:`FedXgbCyclic` in :code:`flwr.server.strategy.fedxgb_cyclic.py`, -in order to sequentially select only one client in given round and pass the received model to next client. + class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] + +The customised ``ClientManager`` samples all available clients in each FL round based on +the order of connection to the server. Then, we define a new strategy ``FedXgbCyclic`` +in ``flwr.server.strategy.fedxgb_cyclic.py``, in order to sequentially select only one +client in given round and pass the received model to next client. .. code-block:: python - class FedXgbCyclic(FedAvg): - """Configurable FedXgbCyclic strategy implementation.""" - - # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long - def __init__( - self, - **kwargs: Any, - ): - self.global_model: Optional[bytes] = None - super().__init__(**kwargs) - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - """Aggregate fit results using bagging.""" - if not results: - return None, {} - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} - - # Fetch the client model from last round as global model - for _, fit_res in results: - update = fit_res.parameters.tensors - for bst in update: - self.global_model = bst - - return ( - Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), - {}, - ) - -Unlike the original :code:`FedAvg`, we don't perform aggregation here. -Instead, we just make a copy of the received client model as global model by overriding :code:`aggregate_fit`. - -Also, the customised :code:`configure_fit` and :code:`configure_evaluate` methods ensure the clients to be sequentially selected given FL round: + class FedXgbCyclic(FedAvg): + """Configurable FedXgbCyclic strategy implementation.""" + + # pylint: disable=too-many-arguments,too-many-instance-attributes, line-too-long + def __init__( + self, + **kwargs: Any, + ): + self.global_model: Optional[bytes] = None + super().__init__(**kwargs) + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using bagging.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Fetch the client model from last round as global model + for _, fit_res in results: + update = fit_res.parameters.tensors + for bst in update: + self.global_model = bst + + return ( + Parameters(tensor_type="", tensors=[cast(bytes, self.global_model)]), + {}, + ) + +Unlike the original ``FedAvg``, we don't perform aggregation here. Instead, we just make +a copy of the received client model as global model by overriding ``aggregate_fit``. + +Also, the customised ``configure_fit`` and ``configure_evaluate`` methods ensure the +clients to be sequentially selected given FL round: .. code-block:: python - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - """Configure the next round of training.""" - config = {} - if self.on_fit_config_fn is not None: - # Custom fit config function provided - config = self.on_fit_config_fn(server_round) - fit_ins = FitIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_fit_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, fit_ins) for client in sampled_clients] - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - """Configure the next round of evaluation.""" - # Do not configure federated evaluation if fraction eval is 0. - if self.fraction_evaluate == 0.0: - return [] - - # Parameters and config - config = {} - if self.on_evaluate_config_fn is not None: - # Custom evaluation config function provided - config = self.on_evaluate_config_fn(server_round) - evaluate_ins = EvaluateIns(parameters, config) - - # Sample clients - sample_size, min_num_clients = self.num_evaluation_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, - min_num_clients=min_num_clients, - ) - - # Sample the clients sequentially given server_round - sampled_idx = (server_round - 1) % len(clients) - sampled_clients = [clients[sampled_idx]] - - # Return client/config pairs - return [(client, evaluate_ins) for client in sampled_clients] + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + config = {} + if self.on_fit_config_fn is not None: + # Custom fit config function provided + config = self.on_fit_config_fn(server_round) + fit_ins = FitIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_fit_clients(client_manager.num_available()) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, fit_ins) for client in sampled_clients] + def configure_evaluate( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, EvaluateIns]]: + """Configure the next round of evaluation.""" + # Do not configure federated evaluation if fraction eval is 0. + if self.fraction_evaluate == 0.0: + return [] + + # Parameters and config + config = {} + if self.on_evaluate_config_fn is not None: + # Custom evaluation config function provided + config = self.on_evaluate_config_fn(server_round) + evaluate_ins = EvaluateIns(parameters, config) + + # Sample clients + sample_size, min_num_clients = self.num_evaluation_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, + min_num_clients=min_num_clients, + ) + + # Sample the clients sequentially given server_round + sampled_idx = (server_round - 1) % len(clients) + sampled_clients = [clients[sampled_idx]] + + # Return client/config pairs + return [(client, evaluate_ins) for client in sampled_clients] Customised data partitioning -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to instantiate the data partitioner -based on the given :code:`num_partitions` and :code:`partitioner_type`. -Currently, we provide four supported partitioner type to simulate the uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). +In ``dataset.py``, we have a function ``instantiate_partitioner`` to instantiate the +data partitioner based on the given ``num_partitions`` and ``partitioner_type``. +Currently, we provide four supported partitioner type to simulate the +uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). .. code-block:: python @@ -841,11 +869,10 @@ Currently, we provide four supported partitioner type to simulate the uniformity ) return partitioner - Customised centralised/distributed evaluation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To facilitate centralised evaluation, we define a function in :code:`server_utils.py`: +To facilitate centralised evaluation, we define a function in ``server_utils.py``: .. code-block:: python @@ -877,105 +904,112 @@ To facilitate centralised evaluation, we define a function in :code:`server_util return evaluate_fn -This function returns a evaluation function which instantiates a :code:`Booster` object and loads the global model weights to it. -The evaluation is conducted by calling :code:`eval_set()` method, and the tested AUC value is reported. +This function returns a evaluation function which instantiates a ``Booster`` object and +loads the global model weights to it. The evaluation is conducted by calling +``eval_set()`` method, and the tested AUC value is reported. As for distributed evaluation on the clients, it's same as the quick-start example by -overriding the :code:`evaluate()` method insides the :code:`XgbClient` class in :code:`client_utils.py`. +overriding the ``evaluate()`` method insides the ``XgbClient`` class in +``client_utils.py``. Flower simulation -~~~~~~~~~~~~~~~~~~~~ -We also provide an example code (:code:`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. - -.. code-block:: python +~~~~~~~~~~~~~~~~~ - from logging import INFO - import xgboost as xgb - from tqdm import tqdm - - import flwr as fl - from flwr_datasets import FederatedDataset - from flwr.common.logger import log - from flwr.server.strategy import FedXgbBagging, FedXgbCyclic - - from dataset import ( - instantiate_partitioner, - train_test_split, - transform_dataset_to_dmatrix, - separate_xy, - resplit, - ) - from utils import ( - sim_args_parser, - NUM_LOCAL_ROUND, - BST_PARAMS, - ) - from server_utils import ( - eval_config, - fit_config, - evaluate_metrics_aggregation, - get_evaluate_fn, - CyclicClientManager, - ) - from client_utils import XgbClient - -After importing all required packages, we define a :code:`main()` function to perform the simulation process: +We also provide an example code (``sim.py``) to use the simulation capabilities of +Flower to simulate federated XGBoost training on either a single machine or a cluster of +machines. .. code-block:: python - def main(): - # Parse arguments for experimental settings - args = sim_args_parser() + from logging import INFO + import xgboost as xgb + from tqdm import tqdm - # Load (HIGGS) dataset and conduct partitioning - partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.pool_size + import flwr as fl + from flwr_datasets import FederatedDataset + from flwr.common.logger import log + from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + + from dataset import ( + instantiate_partitioner, + train_test_split, + transform_dataset_to_dmatrix, + separate_xy, + resplit, + ) + from utils import ( + sim_args_parser, + NUM_LOCAL_ROUND, + BST_PARAMS, ) - fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - resplitter=resplit, + from server_utils import ( + eval_config, + fit_config, + evaluate_metrics_aggregation, + get_evaluate_fn, + CyclicClientManager, ) + from client_utils import XgbClient - # Load centralised test set - if args.centralised_eval or args.centralised_eval_client: - log(INFO, "Loading centralised test set...") - test_data = fds.load_split("test") - test_data.set_format("numpy") - num_test = test_data.shape[0] - test_dmatrix = transform_dataset_to_dmatrix(test_data) - - # Load partitions and reformat data to DMatrix for xgboost - log(INFO, "Loading client local partitions...") - train_data_list = [] - valid_data_list = [] - - # Load and process all client partitions. This upfront cost is amortized soon - # after the simulation begins since clients wont need to preprocess their partition. - for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): - # Extract partition for client with node_id - partition = fds.load_partition(node_id=node_id, split="train") - partition.set_format("numpy") - - if args.centralised_eval_client: - # Use centralised test set for evaluation - train_data = partition - num_train = train_data.shape[0] - x_test, y_test = separate_xy(test_data) - valid_data_list.append(((x_test, y_test), num_test)) - else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - x_valid, y_valid = separate_xy(valid_data) - valid_data_list.append(((x_valid, y_valid), num_val)) +After importing all required packages, we define a ``main()`` function to perform the +simulation process: - x_train, y_train = separate_xy(train_data) - train_data_list.append(((x_train, y_train), num_train)) +.. code-block:: python + + def main(): + # Parse arguments for experimental settings + args = sim_args_parser() -We first load the dataset and perform data partitioning, and the pre-processed data is stored in a :code:`list`. -After the simulation begins, the clients won't need to pre-process their partitions again. + # Load (HIGGS) dataset and conduct partitioning + partitioner = instantiate_partitioner( + partitioner_type=args.partitioner_type, num_partitions=args.pool_size + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, + ) + + # Load centralised test set + if args.centralised_eval or args.centralised_eval_client: + log(INFO, "Loading centralised test set...") + test_data = fds.load_split("test") + test_data.set_format("numpy") + num_test = test_data.shape[0] + test_dmatrix = transform_dataset_to_dmatrix(test_data) + + # Load partitions and reformat data to DMatrix for xgboost + log(INFO, "Loading client local partitions...") + train_data_list = [] + valid_data_list = [] + + # Load and process all client partitions. This upfront cost is amortized soon + # after the simulation begins since clients wont need to preprocess their partition. + for node_id in tqdm(range(args.pool_size), desc="Extracting client partition"): + # Extract partition for client with node_id + partition = fds.load_partition(node_id=node_id, split="train") + partition.set_format("numpy") + + if args.centralised_eval_client: + # Use centralised test set for evaluation + train_data = partition + num_train = train_data.shape[0] + x_test, y_test = separate_xy(test_data) + valid_data_list.append(((x_test, y_test), num_test)) + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=args.test_fraction, seed=args.seed + ) + x_valid, y_valid = separate_xy(valid_data) + valid_data_list.append(((x_valid, y_valid), num_val)) + + x_train, y_train = separate_xy(train_data) + train_data_list.append(((x_train, y_train), num_train)) + +We first load the dataset and perform data partitioning, and the pre-processed data is +stored in a ``list``. After the simulation begins, the clients won't need to pre-process +their partitions again. Then, we define the strategies and other hyper-parameters: @@ -985,21 +1019,21 @@ Then, we define the strategies and other hyper-parameters: if args.train_method == "bagging": # Bagging training strategy = FedXgbBagging( - evaluate_function=get_evaluate_fn(test_dmatrix) - if args.centralised_eval - else None, + evaluate_function=( + get_evaluate_fn(test_dmatrix) if args.centralised_eval else None + ), fraction_fit=(float(args.num_clients_per_round) / args.pool_size), min_fit_clients=args.num_clients_per_round, min_available_clients=args.pool_size, - min_evaluate_clients=args.num_evaluate_clients - if not args.centralised_eval - else 0, + min_evaluate_clients=( + args.num_evaluate_clients if not args.centralised_eval else 0 + ), fraction_evaluate=1.0 if not args.centralised_eval else 0.0, on_evaluate_config_fn=eval_config, on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation - if not args.centralised_eval - else None, + evaluate_metrics_aggregation_fn=( + evaluate_metrics_aggregation if not args.centralised_eval else None + ), ) else: # Cyclic training @@ -1028,7 +1062,7 @@ Then, we define the strategies and other hyper-parameters: new_lr = params["eta"] / args.pool_size params.update({"eta": new_lr}) -After that, we start the simulation by calling :code:`fl.simulation.start_simulation`: +After that, we start the simulation by calling ``fl.simulation.start_simulation``: .. code-block:: python @@ -1048,53 +1082,52 @@ After that, we start the simulation by calling :code:`fl.simulation.start_simula client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, ) -One of key parameters for :code:`start_simulation` is :code:`client_fn` which returns a function to construct a client. -We define it as follows: +One of key parameters for ``start_simulation`` is ``client_fn`` which returns a function +to construct a client. We define it as follows: .. code-block:: python - def get_client_fn( - train_data_list, valid_data_list, train_method, params, num_local_round - ): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - x_train, y_train = train_data_list[int(cid)][0] - x_valid, y_valid = valid_data_list[int(cid)][0] - - # Reformat data to DMatrix - train_dmatrix = xgb.DMatrix(x_train, label=y_train) - valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) - - # Fetch the number of examples - num_train = train_data_list[int(cid)][1] - num_val = valid_data_list[int(cid)][1] - - # Create and return client - return XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ) - - return client_fn - + def get_client_fn( + train_data_list, valid_data_list, train_method, params, num_local_round + ): + """Return a function to construct a client. + + The VirtualClientEngine will execute this function whenever a client is sampled by + the strategy to participate. + """ + + def client_fn(cid: str) -> fl.client.Client: + """Construct a FlowerClient with its own dataset partition.""" + x_train, y_train = train_data_list[int(cid)][0] + x_valid, y_valid = valid_data_list[int(cid)][0] + + # Reformat data to DMatrix + train_dmatrix = xgb.DMatrix(x_train, label=y_train) + valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) + + # Fetch the number of examples + num_train = train_data_list[int(cid)][1] + num_val = valid_data_list[int(cid)][1] + + # Create and return client + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + return client_fn Arguments parser -~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ -In :code:`utils.py`, we define the arguments parsers for clients, server and simulation, allowing users to specify different experimental settings. -Let's first see the sever side: +In ``utils.py``, we define the arguments parsers for clients, server and simulation, +allowing users to specify different experimental settings. Let's first see the sever +side: .. code-block:: python @@ -1102,190 +1135,192 @@ Let's first see the sever side: def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - - args = parser.parse_args() - return args - -This allows user to specify training strategies / the number of total clients / FL rounds / participating clients / clients for evaluation, -and evaluation fashion. Note that with :code:`--centralised-eval`, the sever will do centralised evaluation -and all functionalities for client evaluation will be disabled. + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--pool-size", default=2, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=5, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=2, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=2, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + + args = parser.parse_args() + return args + +This allows user to specify training strategies / the number of total clients / FL +rounds / participating clients / clients for evaluation, and evaluation fashion. Note +that with ``--centralised-eval``, the sever will do centralised evaluation and all +functionalities for client evaluation will be disabled. Then, the argument parser on client side: .. code-block:: python def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--node-id", - default=0, - type=int, - help="Node ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args - -This defines various options for client data partitioning. -Besides, clients also have an option to conduct evaluation on centralised test set by setting :code:`--centralised-eval`, -as well as an option to perform scaled learning rate based on the number of clients by setting :code:`--scaled-lr`. + """Parse arguments to define experimental settings on client side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + parser.add_argument( + "--num-partitions", default=10, type=int, help="Number of partitions." + ) + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args + +This defines various options for client data partitioning. Besides, clients also have an +option to conduct evaluation on centralised test set by setting ``--centralised-eval``, +as well as an option to perform scaled learning rate based on the number of clients by +setting ``--scaled-lr``. We also have an argument parser for simulation: .. code-block:: python - def sim_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - - # Server side - parser.add_argument( - "--pool-size", default=5, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=30, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=5, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=5, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - parser.add_argument( - "--num-cpus-per-client", - default=2, - type=int, - help="Number of CPUs used for per client.", - ) - - # Client side - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval-client", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args + def sim_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--train-method", + default="bagging", + type=str, + choices=["bagging", "cyclic"], + help="Training methods selected from bagging aggregation or cyclic training.", + ) + + # Server side + parser.add_argument( + "--pool-size", default=5, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=30, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=5, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=5, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + parser.add_argument( + "--num-cpus-per-client", + default=2, + type=int, + help="Number of CPUs used for per client.", + ) + + # Client side + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval-client", + action="store_true", + help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", + ) + parser.add_argument( + "--scaled-lr", + action="store_true", + help="Perform scaled learning rate based on the number of clients (True).", + ) + + args = parser.parse_args() + return args This integrates all arguments for both client and server sides. Example commands -~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~ -To run a centralised evaluated experiment with bagging strategy on 5 clients with exponential distribution for 50 rounds, -we first start the server as below: +To run a centralised evaluated experiment with bagging strategy on 5 clients with +exponential distribution for 50 rounds, we first start the server as below: .. code-block:: shell @@ -1303,4 +1338,6 @@ To run the same experiment with Flower simulation: $ python3 sim.py --train-method=bagging --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --partitioner-type=exponential --centralised-eval -The full `code `_ for this comprehensive example can be found in :code:`examples/xgboost-comprehensive`. +The full `code +`_ for this +comprehensive example can be found in ``examples/xgboost-comprehensive``. diff --git a/e2e/docker/pyproject.toml b/e2e/docker/pyproject.toml index 955f30c7bf8d..def93ed4065d 100644 --- a/e2e/docker/pyproject.toml +++ b/e2e/docker/pyproject.toml @@ -6,9 +6,7 @@ build-backend = "hatchling.build" name = "e2e-docker" version = "0.1.0" description = "TOML used to define dependencies in a E2E test" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +authors = [{ name = "The Flower Authors", email = "hello@flower.ai" }] dependencies = [ "flwr-datasets[vision]>=0.1.0,<1.0.0", "torch==2.2.1", diff --git a/e2e/e2e-bare-auth/certificate.conf b/e2e/e2e-bare-auth/certificate.conf index ea97fcbb700d..04a2ed388174 100644 --- a/e2e/e2e-bare-auth/certificate.conf +++ b/e2e/e2e-bare-auth/certificate.conf @@ -18,3 +18,4 @@ subjectAltName = @alt_names DNS.1 = localhost IP.1 = ::1 IP.2 = 127.0.0.1 +IP.3 = 0.0.0.0 diff --git a/e2e/e2e-bare-auth/pyproject.toml b/e2e/e2e-bare-auth/pyproject.toml index 9b451c2ead99..d3ca5e543011 100644 --- a/e2e/e2e-bare-auth/pyproject.toml +++ b/e2e/e2e-bare-auth/pyproject.toml @@ -7,9 +7,7 @@ name = "e2e-bare-auth" version = "1.0.0" description = "Auth-enabled bare Federated Learning test with Flower" license = "Apache-2.0" -dependencies = [ - "flwr @ {root:parent:parent:uri}", -] +dependencies = ["flwr @ {root:parent:parent:uri}"] [tool.hatch.build.targets.wheel] packages = ["."] diff --git a/e2e/e2e-bare-https/pyproject.toml b/e2e/e2e-bare-https/pyproject.toml index 0316e2b8402a..e1ec84157788 100644 --- a/e2e/e2e-bare-https/pyproject.toml +++ b/e2e/e2e-bare-https/pyproject.toml @@ -7,9 +7,7 @@ name = "e2e-bare-https" version = "1.0.0" description = "HTTPS-enabled bare Federated Learning test with Flower" license = "Apache-2.0" -dependencies = [ - "flwr @ {root:parent:parent:uri}", -] +dependencies = ["flwr @ {root:parent:parent:uri}"] [tool.hatch.build.targets.wheel] packages = ["."] diff --git a/e2e/e2e-bare/pyproject.toml b/e2e/e2e-bare/pyproject.toml index 653d037a0192..12099fcd9027 100644 --- a/e2e/e2e-bare/pyproject.toml +++ b/e2e/e2e-bare/pyproject.toml @@ -7,9 +7,7 @@ name = "e2e-bare" version = "1.0.0" description = "Bare Federated Learning test with Flower" license = "Apache-2.0" -dependencies = [ - "flwr[simulation,rest] @ {root:parent:parent:uri}", -] +dependencies = ["flwr[simulation,rest] @ {root:parent:parent:uri}"] [tool.hatch.build.targets.wheel] packages = ["."] diff --git a/e2e/e2e-fastai/pyproject.toml b/e2e/e2e-fastai/pyproject.toml index 58fecdabcc5d..6b1cbd66600e 100644 --- a/e2e/e2e-fastai/pyproject.toml +++ b/e2e/e2e-fastai/pyproject.toml @@ -11,6 +11,7 @@ dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "fastai>=2.7.12,<3.0.0", "torch>=2.0.0,!=2.0.1,<2.1.0", + "spacy==3.7.6", ] [tool.hatch.build.targets.wheel] diff --git a/e2e/e2e-pandas/pyproject.toml b/e2e/e2e-pandas/pyproject.toml index f7d8f40264b3..f10b05b44756 100644 --- a/e2e/e2e-pandas/pyproject.toml +++ b/e2e/e2e-pandas/pyproject.toml @@ -7,12 +7,8 @@ name = "e2e-pandas" version = "1.0.0" description = "Pandas E2E test with Flower" license = "Apache-2.0" -authors = [ - { name = "Ragy Haddad", email = "ragy202@gmail.com" }, -] -maintainers = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +authors = [{ name = "Ragy Haddad", email = "ragy202@gmail.com" }] +maintainers = [{ name = "The Flower Authors", email = "hello@flower.ai" }] dependencies = [ "flwr[simulation] @ {root:parent:parent:uri}", "numpy>=1.21.0,<2.0.0", diff --git a/e2e/e2e-scikit-learn/pyproject.toml b/e2e/e2e-scikit-learn/pyproject.toml index e14ea6ecc675..aef9a4a8a00b 100644 --- a/e2e/e2e-scikit-learn/pyproject.toml +++ b/e2e/e2e-scikit-learn/pyproject.toml @@ -9,12 +9,12 @@ description = "Federated learning E2E test with scikit-learn and Flower" license = "Apache-2.0" authors = [ { name = "The Flower Authors", email = "hello@flower.ai" }, - { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in"}, + { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] dependencies = [ "flwr[simulation,rest] @ {root:parent:parent:uri}", "scikit-learn>=1.1.1,<2.0.0", - "openml>=0.14.0,<0.15.0" + "openml>=0.14.0,<0.15.0", ] [tool.hatch.build.targets.wheel] diff --git a/e2e/strategies/pyproject.toml b/e2e/strategies/pyproject.toml index 5cc74b20fa24..3ad62ec836a7 100644 --- a/e2e/strategies/pyproject.toml +++ b/e2e/strategies/pyproject.toml @@ -9,7 +9,7 @@ description = "Keras Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } tensorflow-cpu = "^2.9.1, !=2.11.1" tensorflow-io-gcs-filesystem = "<0.35.0" diff --git a/e2e/test_exec_api.sh b/e2e/test_exec_api.sh new file mode 100755 index 000000000000..fd5e8c69d1de --- /dev/null +++ b/e2e/test_exec_api.sh @@ -0,0 +1,120 @@ +#!/bin/bash +set -e + +# Set connectivity parameters +case "$1" in + secure) + ./generate.sh + server_arg='--ssl-ca-certfile ../certificates/ca.crt + --ssl-certfile ../certificates/server.pem + --ssl-keyfile ../certificates/server.key' + client_arg='--root-certificates ../certificates/ca.crt' + # For $executor_config, note special ordering of single- and double-quotes + executor_config='root-certificates="../certificates/ca.crt"' + ;; + insecure) + server_arg='--insecure' + client_arg=$server_arg + executor_config='' + ;; +esac + +# Set authentication parameters +case "$2" in + client-auth) + server_auth='--auth-list-public-keys ../keys/client_public_keys.csv + --auth-superlink-private-key ../keys/server_credentials + --auth-superlink-public-key ../keys/server_credentials.pub' + client_auth_1='--auth-supernode-private-key ../keys/client_credentials_1 + --auth-supernode-public-key ../keys/client_credentials_1.pub' + client_auth_2='--auth-supernode-private-key ../keys/client_credentials_2 + --auth-supernode-public-key ../keys/client_credentials_2.pub' + server_address='127.0.0.1:9092' + ;; + *) + server_auth='' + client_auth_1='' + client_auth_2='' + server_address='127.0.0.1:9092' + ;; +esac + +# Set engine +case "$3" in + deployment-engine) + executor_arg="--executor flwr.superexec.deployment:executor" + ;; + simulation-engine) + executor_config="$executor_config num-supernodes=10" + executor_arg="--executor flwr.superexec.simulation:executor" + ;; +esac + + +# Create and install Flower app +flwr new e2e-tmp-test --framework numpy --username flwrlabs +cd e2e-tmp-test +# Remove flwr dependency from `pyproject.toml`. Seems necessary so that it does +# not override the wheel dependency +if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS (Darwin) system + sed -i '' '/flwr\[simulation\]/d' pyproject.toml +else + # Non-macOS system (Linux) + sed -i '/flwr\[simulation\]/d' pyproject.toml +fi +pip install -e . --no-deps + +# Check if the first argument is 'insecure' +if [ "$1" == "insecure" ]; then + # If $1 is 'insecure', append the first line + echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\ninsecure = true" >> pyproject.toml +else + # Otherwise, append the second line + echo -e $"\n[tool.flwr.federations.e2e]\naddress = \"127.0.0.1:9093\"\nroot-certificates = \"../certificates/ca.crt\"" >> pyproject.toml +fi + +# Combine the arguments into a single command for flower-superlink +combined_args="$server_arg $server_auth $exec_api_arg $executor_arg" + +timeout 2m flower-superlink $combined_args --executor-config "$executor_config" 2>&1 | tee flwr_output.log & +sl_pid=$(pgrep -f "flower-superlink") +sleep 2 + +timeout 2m flower-supernode ./ $client_arg \ + --superlink $server_address $client_auth_1 \ + --node-config "partition-id=0 num-partitions=2" --max-retries 0 & +cl1_pid=$! +sleep 2 + +timeout 2m flower-supernode ./ $client_arg \ + --superlink $server_address $client_auth_2 \ + --node-config "partition-id=1 num-partitions=2" --max-retries 0 & +cl2_pid=$! +sleep 2 + +timeout 1m flwr run --run-config num-server-rounds=1 ../e2e-tmp-test e2e + +# Initialize a flag to track if training is successful +found_success=false +timeout=120 # Timeout after 120 seconds +elapsed=0 + +# Check for "Success" in a loop with a timeout +while [ "$found_success" = false ] && [ $elapsed -lt $timeout ]; do + if grep -q "Run finished" flwr_output.log; then + echo "Training worked correctly!" + found_success=true + kill $cl1_pid; kill $cl2_pid; sleep 1; kill $sl_pid; + else + echo "Waiting for training ... ($elapsed seconds elapsed)" + fi + # Sleep for a short period and increment the elapsed time + sleep 2 + elapsed=$((elapsed + 2)) +done + +if [ "$found_success" = false ]; then + echo "Training had an issue and timed out." + kill $cl1_pid; kill $cl2_pid; kill $sl_pid; +fi diff --git a/e2e/test_superlink.sh b/e2e/test_superlink.sh index 684f386bd388..630c6dcf8e96 100755 --- a/e2e/test_superlink.sh +++ b/e2e/test_superlink.sh @@ -2,7 +2,7 @@ set -e case "$1" in - e2e-bare-https) + e2e-bare-https | e2e-bare-auth) ./generate.sh server_arg="--ssl-ca-certfile certificates/ca.crt --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key" client_arg="--root-certificates certificates/ca.crt" @@ -19,7 +19,7 @@ case "$2" in rest) rest_arg_superlink="--fleet-api-type rest" rest_arg_supernode="--rest" - server_address="http://localhost:9093" + server_address="http://localhost:9095" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" server_auth="" @@ -37,14 +37,11 @@ case "$2" in client_auth_2="" ;; client-auth) - ./generate.sh rest_arg_superlink="" rest_arg_supernode="" server_address="127.0.0.1:9092" server_app_address="127.0.0.1:9091" db_arg="--database :flwr-in-memory-state:" - server_arg="--ssl-ca-certfile certificates/ca.crt --ssl-certfile certificates/server.pem --ssl-keyfile certificates/server.key" - client_arg="--root-certificates certificates/ca.crt" server_auth="--auth-list-public-keys keys/client_public_keys.csv --auth-superlink-private-key keys/server_credentials --auth-superlink-public-key keys/server_credentials.pub" client_auth_1="--auth-supernode-private-key keys/client_credentials_1 --auth-supernode-public-key keys/client_credentials_1.pub" client_auth_2="--auth-supernode-private-key keys/client_credentials_2 --auth-supernode-public-key keys/client_credentials_2.pub" diff --git a/examples/advanced-pytorch/.gitignore b/examples/advanced-pytorch/.gitignore new file mode 100644 index 000000000000..014ee796bf45 --- /dev/null +++ b/examples/advanced-pytorch/.gitignore @@ -0,0 +1,3 @@ +__pycache__/ +outputs/ +wandb/ diff --git a/examples/advanced-pytorch/README.md b/examples/advanced-pytorch/README.md index ac0737673407..1771173c3925 100644 --- a/examples/advanced-pytorch/README.md +++ b/examples/advanced-pytorch/README.md @@ -1,77 +1,90 @@ --- -tags: [advanced, vision, fds] -dataset: [CIFAR-10] +tags: [advanced, vision, fds, wandb] +dataset: [Fashion-MNIST] framework: [torch, torchvision] --- -# Advanced Flower Example (PyTorch) +# Federated Learning with PyTorch and Flower (Advanced Example) -This example demonstrates an advanced federated learning setup using Flower with PyTorch. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) and it differs from the quickstart example in the following ways: +> \[!TIP\] +> This example shows intermediate and advanced functionality of Flower. It you are new to Flower, it is recommended to start from the [quickstart-pytorch](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example or the [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pytorch.html). -- 10 clients (instead of just 2) -- Each client holds a local dataset of 5000 training examples and 1000 test examples (note that using the `run.sh` script will only select 10 data samples by default, as the `--toy` argument is set). -- Server-side model evaluation after parameter aggregation -- Hyperparameter schedule using config functions -- Custom return values -- Server-side parameter initialization +This example shows how to extend your `ClientApp` and `ServerApp` capabilities compared to what's shown in the [`quickstart-pytorch`](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) example. In particular, it will show how the `ClientApp`'s state (and object of type [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html)) can be used to enable stateful clients, facilitating the design of personalized federated learning strategies, among others. The `ServerApp` in this example makes use of a custom strategy derived from the built-in [FedAvg](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedAvg.html). In addition, it will also showcase how to: -## Project Setup +1. Save model checkpoints +2. Save the metrics available at the strategy (e.g. accuracies, losses) +3. Log training artefacts to [Weights & Biases](https://wandb.ai/site) +4. Implement a simple decaying learning rate schedule across rounds -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +The structure of this directory is as follows: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/advanced-pytorch . && rm -rf flower && cd advanced-pytorch +advanced-pytorch +├── pytorch_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines a custom strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -This will create a new directory called `advanced-pytorch` containing the following files: +> \[!NOTE\] +> By default this example will log metrics to Weights & Biases. For this, you need to ensure that your system has logged in. Often it's as simple as executing `wandb login` on the terminal after installing `wandb`. Please, refer to this [quickstart guide](https://docs.wandb.ai/quickstart#2-log-in-to-wb) for more information. -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md --- run.sh -``` +This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) with the [Dirichlet Partitioner](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.DirichletPartitioner.html#flwr_datasets.partitioner.DirichletPartitioner) to partition the [Fashion-MNIST](https://huggingface.co/datasets/zalando-datasets/fashion_mnist) dataset in a non-IID fashion into 50 partitions. -### Installing Dependencies +![](_static/fmnist_50_lda.png) -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +> \[!TIP\] +> You can use Flower Datasets [built-in visualization tools](https://flower.ai/docs/datasets/tutorial-visualize-label-distribution.html) to easily generate plots like the one above. -#### Poetry +### Install dependencies and project -```shell -poetry install -poetry shell +Install the dependencies defined in `pyproject.toml` as well as the `pytorch_example` package. + +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +## Run the project -```shell -poetry run python3 -c "import flwr" -``` +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -If you don't see any errors you're good to go! +When you run the project, the strategy will create a directory structure in the form of `outputs/date/time` and store two `JSON` files: `config.json` containing the `run-config` that the `ServerApp` receives; and `results.json` containing the results (accuracies, losses) that are generated at the strategy. -#### pip +By default, the metrics: {`centralized_accuracy`, `centralized_loss`, `federated_evaluate_accuracy`, `federated_evaluate_loss`} will be logged to Weights & Biases (they are also stored to the `results.json` previously mentioned). Upon executing `flwr run` you'll see a URL linking to your Weight&Biases dashboard wher you can see the metrics. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +![](_static/wandb_plots.png) -```shell -pip install -r requirements.txt +### Run with the Simulation Engine + +With default parameters, 25% of the total 50 nodes (see `num-supernodes` in `pyproject.toml`) will be sampled for `fit` and 50% for an `evaluate` round. By default `ClientApp` objects will run on CPU. + +> \[!TIP\] +> To run your `ClientApps` on GPU or to adjust the degree or parallelism of your simulation, edit the `[tool.flwr.federations.local-simulation]` section in the `pyproject.tom`. + +```bash +flwr run . + +# To disable W&B +flwr run . --run-config use-wandb=false ``` -## Run Federated Learning with PyTorch and Flower +You can run the app using another federation (see `pyproject.toml`). For example, if you have a GPU available, select the `local-sim-gpu` federation: -The included `run.sh` will start the Flower server (using `server.py`), -sleep for 2 seconds to ensure that the server is up, and then start 10 Flower clients (using `client.py`) with only a small subset of the data (in order to run on any machine), -but this can be changed by removing the `--toy` argument in the script. You can simply start everything in a terminal as follows: +```bash +flwr run . local-sim-gpu +``` -```shell -# After activating your environment -./run.sh +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "num-server-rounds=5 fraction-fit=0.5" ``` -The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). +### Run with the Deployment Engine -You can also manually run `python3 server.py` and `python3 client.py --client-id ` for as many clients as you want but you have to make sure that each command is run in a different terminal window (or a different computer on the network). In addition, you can make your clients use either `EfficienNet` (default) or `AlexNet` (but all clients in the experiment should use the same). Switch between models using the `--model` flag when launching `client.py` and `server.py`. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/advanced-pytorch/_static/fmnist_50_lda.png b/examples/advanced-pytorch/_static/fmnist_50_lda.png new file mode 100644 index 000000000000..9dfedc59a3de Binary files /dev/null and b/examples/advanced-pytorch/_static/fmnist_50_lda.png differ diff --git a/examples/advanced-pytorch/_static/wandb_plots.png b/examples/advanced-pytorch/_static/wandb_plots.png new file mode 100644 index 000000000000..f0f44ca5be19 Binary files /dev/null and b/examples/advanced-pytorch/_static/wandb_plots.png differ diff --git a/examples/advanced-pytorch/client.py b/examples/advanced-pytorch/client.py deleted file mode 100644 index 1b93d45d950e..000000000000 --- a/examples/advanced-pytorch/client.py +++ /dev/null @@ -1,160 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import datasets -import flwr as fl -import torch -from torch.utils.data import DataLoader - -import utils - -warnings.filterwarnings("ignore") - - -class CifarClient(fl.client.NumPyClient): - def __init__( - self, - trainset: datasets.Dataset, - testset: datasets.Dataset, - device: torch.device, - model_str: str, - validation_split: int = 0.1, - ): - self.device = device - self.trainset = trainset - self.testset = testset - self.validation_split = validation_split - if model_str == "alexnet": - self.model = utils.load_alexnet(classes=10) - else: - self.model = utils.load_efficientnet(classes=10) - - def set_parameters(self, parameters): - """Loads a alexnet or efficientnet model and replaces it parameters with the - ones given.""" - - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - """Train parameters on the locally held training set.""" - - # Update local model parameters - self.set_parameters(parameters) - - # Get hyperparameters for this round - batch_size: int = config["batch_size"] - epochs: int = config["local_epochs"] - - train_valid = self.trainset.train_test_split(self.validation_split, seed=42) - trainset = train_valid["train"] - valset = train_valid["test"] - - train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True) - val_loader = DataLoader(valset, batch_size=batch_size) - - results = utils.train(self.model, train_loader, val_loader, epochs, self.device) - - parameters_prime = utils.get_model_params(self.model) - num_examples_train = len(trainset) - - return parameters_prime, num_examples_train, results - - def evaluate(self, parameters, config): - """Evaluate parameters on the locally held test set.""" - # Update local model parameters - self.set_parameters(parameters) - - # Get config values - steps: int = config["val_steps"] - - # Evaluate global model parameters on the local test data and return results - testloader = DataLoader(self.testset, batch_size=16) - - loss, accuracy = utils.test(self.model, testloader, steps, self.device) - return float(loss), len(self.testset), {"accuracy": float(accuracy)} - - -def client_dry_run(device: torch.device = "cpu"): - """Weak tests to check whether all client methods are working as expected.""" - - model = utils.load_efficientnet(classes=10) - trainset, testset = utils.load_partition(0) - trainset = trainset.select(range(10)) - testset = testset.select(range(10)) - client = CifarClient(trainset, testset, device) - client.fit( - utils.get_model_params(model), - {"batch_size": 16, "local_epochs": 1}, - ) - - client.evaluate(utils.get_model_params(model), {"val_steps": 32}) - - print("Dry Run Successful") - - -def main() -> None: - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--dry", - type=bool, - default=False, - required=False, - help="Do a dry-run to check the client", - ) - parser.add_argument( - "--client-id", - type=int, - default=0, - choices=range(0, 10), - required=False, - help="Specifies the artificial data partition of CIFAR10 to be used. \ - Picks partition 0 by default", - ) - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to quicky run the client using only 10 datasamples. \ - Useful for testing purposes. Default: False", - ) - parser.add_argument( - "--use_cuda", - type=bool, - default=False, - required=False, - help="Set to true to use GPU. Default: False", - ) - parser.add_argument( - "--model", - type=str, - default="efficientnet", - choices=["efficientnet", "alexnet"], - help="Use either Efficientnet or Alexnet models. \ - If you want to achieve differential privacy, please use the Alexnet model", - ) - - args = parser.parse_args() - - device = torch.device( - "cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu" - ) - - if args.dry: - client_dry_run(device) - else: - # Load a subset of CIFAR-10 to simulate the local data partition - trainset, testset = utils.load_partition(args.client_id) - - if args.toy: - trainset = trainset.select(range(10)) - testset = testset.select(range(10)) - # Start Flower client - client = CifarClient(trainset, testset, device, args.model).to_client() - fl.client.start_client(server_address="127.0.0.1:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-pytorch/pyproject.toml b/examples/advanced-pytorch/pyproject.toml index b846a6054cc8..84ad510db50a 100644 --- a/examples/advanced-pytorch/pyproject.toml +++ b/examples/advanced-pytorch/pyproject.toml @@ -1,20 +1,46 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "advanced-pytorch" -version = "0.1.0" -description = "Advanced Flower/PyTorch Example" -authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", +[project] +name = "pytorch-example" +version = "1.0.0" +description = "Federated Learning with PyTorch and Flower (Advanced Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", + "wandb==0.17.8", ] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "1.13.1" -torchvision = "0.14.1" -validators = "0.18.2" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pytorch_example.server_app:app" +clientapp = "pytorch_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 10 +fraction-fit = 0.25 +fraction-evaluate = 0.5 +local-epochs = 1 +server-device = "cpu" +use-wandb = true + +[tool.flwr.federations] +default = "local-sim" + +[tool.flwr.federations.local-sim] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.0 # ratio of VRAM a ClientApp has access to +[tool.flwr.federations.local-sim-gpu] +options.num-supernodes = 50 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.25 diff --git a/examples/advanced-pytorch/pytorch_example/__init__.py b/examples/advanced-pytorch/pytorch_example/__init__.py new file mode 100644 index 000000000000..d93e8cdb922d --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/__init__.py @@ -0,0 +1 @@ +"""pytorch-example: A Flower / PyTorch app.""" diff --git a/examples/advanced-pytorch/pytorch_example/client_app.py b/examples/advanced-pytorch/pytorch_example/client_app.py new file mode 100644 index 000000000000..72a9c8323686 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/client_app.py @@ -0,0 +1,122 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import torch +from pytorch_example.task import Net, get_weights, load_data, set_weights, test, train + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context, ParametersRecord, RecordSet, array_from_numpy + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + """A simple client that showcases how to use the state. + + It implements a basic version of `personalization` by which + the classification layer of the CNN is stored locally and used + and updated during `fit()` and used during `evaluate()`. + """ + + def __init__( + self, net, client_state: RecordSet, trainloader, valloader, local_epochs + ): + self.net: Net = net + self.client_state = client_state + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + self.local_layer_name = "classification-head" + + def fit(self, parameters, config): + """Train model locally. + + The client stores in its context the parameters of the last layer in the model + (i.e. the classification head). The classifier is saved at the end of the + training and used the next time this client participates. + """ + + # Apply weights from global models (the whole model is replaced) + set_weights(self.net, parameters) + + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state() + + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + lr=float(config["lr"]), + device=self.device, + ) + # Save classification head to context's state to use in a future fit() call + self._save_layer_weights_to_state() + + # Return locally-trained model and metrics + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) + + def _save_layer_weights_to_state(self): + """Save last layer weights to state.""" + state_dict_arrays = {} + for k, v in self.net.fc2.state_dict().items(): + state_dict_arrays[k] = array_from_numpy(v.cpu().numpy()) + + # Add to recordset (replace if already exists) + self.client_state.parameters_records[self.local_layer_name] = ParametersRecord( + state_dict_arrays + ) + + def _load_layer_weights_from_state(self): + """Load last layer weights to state.""" + if self.local_layer_name not in self.client_state.parameters_records: + return + + state_dict = {} + param_records = self.client_state.parameters_records + for k, v in param_records[self.local_layer_name].items(): + state_dict[k] = torch.from_numpy(v.numpy()) + + # apply previously saved classification head by this client + self.net.fc2.load_state_dict(state_dict, strict=True) + + def evaluate(self, parameters, config): + """Evaluate the global model on the local validation set. + + Note the classification head is replaced with the weights this client had the + last time it trained the model. + """ + set_weights(self.net, parameters) + # Override weights in classification layer with those this client + # had at the end of the last fit() round it participated in + self._load_layer_weights_from_state() + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + # Load model and data + net = Net() + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + # We pass the state to persist information across + # participation rounds. Note that each client always + # receives the same Context instance (it's a 1:1 mapping) + client_state = context.state + return FlowerClient( + net, client_state, trainloader, valloader, local_epochs + ).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/advanced-pytorch/pytorch_example/server_app.py b/examples/advanced-pytorch/pytorch_example/server_app.py new file mode 100644 index 000000000000..3fa2ae26dc7f --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/server_app.py @@ -0,0 +1,96 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import torch +from pytorch_example.strategy import CustomFedAvg +from pytorch_example.task import ( + Net, + apply_eval_transforms, + get_weights, + set_weights, + test, +) +from torch.utils.data import DataLoader + +from datasets import load_dataset +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + + +def gen_evaluate_fn( + testloader: DataLoader, + device: torch.device, +): + """Generate the function for centralized evaluation.""" + + def evaluate(server_round, parameters_ndarrays, config): + """Evaluate global model on centralized test set.""" + net = Net() + set_weights(net, parameters_ndarrays) + net.to(device) + loss, accuracy = test(net, testloader, device=device) + return loss, {"centralized_accuracy": accuracy} + + return evaluate + + +def on_fit_config(server_round: int): + """Construct `config` that clients receive when running `fit()`""" + lr = 0.1 + # Enable a simple form of learning rate decay + if server_round > 10: + lr /= 2 + return {"lr": lr} + + +# Define metric aggregation function +def weighted_average(metrics): + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"federated_evaluate_accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_eval = context.run_config["fraction-evaluate"] + server_device = context.run_config["server-device"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Prepare dataset for central evaluation + + # This is the exact same dataset as the one donwloaded by the clients via + # FlowerDatasets. However, we don't use FlowerDatasets for the server since + # partitioning is not needed. + # We make use of the "test" split only + global_test_set = load_dataset("zalando-datasets/fashion_mnist")["test"] + + testloader = DataLoader( + global_test_set.with_transform(apply_eval_transforms), + batch_size=32, + ) + + # Define strategy + strategy = CustomFedAvg( + run_config=context.run_config, + use_wandb=context.run_config["use-wandb"], + fraction_fit=fraction_fit, + fraction_evaluate=fraction_eval, + initial_parameters=parameters, + on_fit_config_fn=on_fit_config, + evaluate_fn=gen_evaluate_fn(testloader, device=server_device), + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/advanced-pytorch/pytorch_example/strategy.py b/examples/advanced-pytorch/pytorch_example/strategy.py new file mode 100644 index 000000000000..97fc0010f143 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/strategy.py @@ -0,0 +1,116 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import json +from logging import INFO + +import torch +import wandb +from pytorch_example.task import Net, create_run_dir, set_weights + +from flwr.common import logger, parameters_to_ndarrays +from flwr.common.typing import UserConfig +from flwr.server.strategy import FedAvg + +PROJECT_NAME = "FLOWER-advanced-pytorch" + + +class CustomFedAvg(FedAvg): + """A class that behaves like FedAvg but has extra functionality. + + This strategy: (1) saves results to the filesystem, (2) saves a + checkpoint of the global model when a new best is found, (3) logs + results to W&B if enabled. + """ + + def __init__(self, run_config: UserConfig, use_wandb: bool, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Create a directory where to save results from this run + self.save_path, self.run_dir = create_run_dir(run_config) + self.use_wandb = use_wandb + # Initialise W&B if set + if use_wandb: + self._init_wandb_project() + + # Keep track of best acc + self.best_acc_so_far = 0.0 + + # A dictionary to store results as they come + self.results = {} + + def _init_wandb_project(self): + # init W&B + wandb.init(project=PROJECT_NAME, name=f"{str(self.run_dir)}-ServerApp") + + def _store_results(self, tag: str, results_dict): + """Store results in dictionary, then save as JSON.""" + # Update results dict + if tag in self.results: + self.results[tag].append(results_dict) + else: + self.results[tag] = [results_dict] + + # Save results to disk. + # Note we overwrite the same file with each call to this function. + # While this works, a more sophisticated approach is preferred + # in situations where the contents to be saved are larger. + with open(f"{self.save_path}/results.json", "w", encoding="utf-8") as fp: + json.dump(self.results, fp) + + def _update_best_acc(self, round, accuracy, parameters): + """Determines if a new best global model has been found. + + If so, the model checkpoint is saved to disk. + """ + if accuracy > self.best_acc_so_far: + self.best_acc_so_far = accuracy + logger.log(INFO, "💡 New best global model found: %f", accuracy) + # You could save the parameters object directly. + # Instead we are going to apply them to a PyTorch + # model and save the state dict. + # Converts flwr.common.Parameters to ndarrays + ndarrays = parameters_to_ndarrays(parameters) + model = Net() + set_weights(model, ndarrays) + # Save the PyTorch model + file_name = f"model_state_acc_{accuracy}_round_{round}.pth" + torch.save(model.state_dict(), self.save_path / file_name) + + def store_results_and_log(self, server_round: int, tag: str, results_dict): + """A helper method that stores results and logs them to W&B if enabled.""" + # Store results + self._store_results( + tag=tag, + results_dict={"round": server_round, **results_dict}, + ) + + if self.use_wandb: + # Log centralized loss and metrics to W&B + wandb.log(results_dict, step=server_round) + + def evaluate(self, server_round, parameters): + """Run centralized evaluation if callback was passed to strategy init.""" + loss, metrics = super().evaluate(server_round, parameters) + + # Save model if new best central accuracy is found + self._update_best_acc(server_round, metrics["centralized_accuracy"], parameters) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="centralized_evaluate", + results_dict={"centralized_loss": loss, **metrics}, + ) + return loss, metrics + + def aggregate_evaluate(self, server_round, results, failures): + """Aggregate results from federated evaluation.""" + loss, metrics = super().aggregate_evaluate(server_round, results, failures) + + # Store and log + self.store_results_and_log( + server_round=server_round, + tag="federated_evaluate", + results_dict={"federated_evaluate_loss": loss, **metrics}, + ) + return loss, metrics diff --git a/examples/advanced-pytorch/pytorch_example/task.py b/examples/advanced-pytorch/pytorch_example/task.py new file mode 100644 index 000000000000..0224e8236408 --- /dev/null +++ b/examples/advanced-pytorch/pytorch_example/task.py @@ -0,0 +1,159 @@ +"""pytorch-example: A Flower / PyTorch app.""" + +import json +from collections import OrderedDict +from datetime import datetime +from pathlib import Path + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import DirichletPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import ( + Compose, + Normalize, + RandomCrop, + RandomHorizontalFlip, + ToTensor, +) + +from flwr.common.typing import UserConfig + +FM_NORMALIZATION = ((0.1307,), (0.3081,)) +EVAL_TRANSFORMS = Compose([ToTensor(), Normalize(*FM_NORMALIZATION)]) +TRAIN_TRANSFORMS = Compose( + [ + RandomCrop(28, padding=4), + RandomHorizontalFlip(), + ToTensor(), + Normalize(*FM_NORMALIZATION), + ] +) + + +class Net(nn.Module): + """Model (simple CNN adapted for Fashion-MNIST)""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(1, 16, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(16, 32, 5) + self.fc1 = nn.Linear(32 * 4 * 4, 128) + self.fc2 = nn.Linear(128, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 32 * 4 * 4) + x = F.relu(self.fc1(x)) + return self.fc2(x) + + +def train(net, trainloader, epochs, lr, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss().to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["image"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def apply_train_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [TRAIN_TRANSFORMS(img) for img in batch["image"]] + return batch + + +def apply_eval_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [EVAL_TRANSFORMS(img) for img in batch["image"]] + return batch + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition FashionMNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = DirichletPartitioner( + num_partitions=num_partitions, + partition_by="label", + alpha=1.0, + seed=42, + ) + fds = FederatedDataset( + dataset="zalando-datasets/fashion_mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + train_partition = partition_train_test["train"].with_transform( + apply_train_transforms + ) + test_partition = partition_train_test["test"].with_transform(apply_eval_transforms) + trainloader = DataLoader(train_partition, batch_size=32, shuffle=True) + testloader = DataLoader(test_partition, batch_size=32) + return trainloader, testloader + + +def create_run_dir(config: UserConfig) -> Path: + """Create a directory where to save results from this run.""" + # Create output directory given current timestamp + current_time = datetime.now() + run_dir = current_time.strftime("%Y-%m-%d/%H-%M-%S") + # Save path is based on the current directory + save_path = Path.cwd() / f"outputs/{run_dir}" + save_path.mkdir(parents=True, exist_ok=False) + + # Save run config as json + with open(f"{save_path}/run_config.json", "w", encoding="utf-8") as fp: + json.dump(config, fp) + + return save_path, run_dir diff --git a/examples/advanced-pytorch/requirements.txt b/examples/advanced-pytorch/requirements.txt deleted file mode 100644 index f4d6a0774162..000000000000 --- a/examples/advanced-pytorch/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -torch==1.13.1 -torchvision==0.14.1 -validators==0.18.2 diff --git a/examples/advanced-pytorch/run.sh b/examples/advanced-pytorch/run.sh deleted file mode 100755 index c3d52491b987..000000000000 --- a/examples/advanced-pytorch/run.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -python server.py --toy & -sleep 10 # Sleep for 10s to give the server enough time to start and dowload the dataset - -for i in `seq 0 9`; do - echo "Starting client $i" - python client.py --client-id=${i} --toy & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/advanced-pytorch/server.py b/examples/advanced-pytorch/server.py deleted file mode 100644 index 6b69512fb3b7..000000000000 --- a/examples/advanced-pytorch/server.py +++ /dev/null @@ -1,121 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict -from typing import Dict, Optional, Tuple - -import flwr as fl -import torch -from flwr_datasets import FederatedDataset -from torch.utils.data import DataLoader - -import utils - -warnings.filterwarnings("ignore") - - -def fit_config(server_round: int): - """Return training configuration dict for each round. - - Keep batch size fixed at 32, perform two rounds of training with one local epoch, - increase to two local epochs afterwards. - """ - config = { - "batch_size": 16, - "local_epochs": 1 if server_round < 2 else 2, - } - return config - - -def evaluate_config(server_round: int): - """Return evaluation configuration dict for each round. - - Perform five local evaluation steps on each client (i.e., use five batches) during - rounds one to three, then increase to ten local evaluation steps. - """ - val_steps = 5 if server_round < 4 else 10 - return {"val_steps": val_steps} - - -def get_evaluate_fn(model: torch.nn.Module, toy: bool): - """Return an evaluation function for server-side evaluation.""" - - # Load data here to avoid the overhead of doing it in `evaluate` itself - centralized_data = utils.load_centralized_data() - if toy: - # use only 10 samples as validation set - centralized_data = centralized_data.select(range(10)) - - val_loader = DataLoader(centralized_data, batch_size=16) - - # The `evaluate` function will be called after every round - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ) -> Optional[Tuple[float, Dict[str, fl.common.Scalar]]]: - # Update model with the latest parameters - params_dict = zip(model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - loss, accuracy = utils.test(model, val_loader) - return loss, {"accuracy": accuracy} - - return evaluate - - -def main(): - """Load model for - 1. server-side parameter initialization - 2. server-side parameter evaluation - """ - - # Parse command line argument `partition` - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--toy", - action="store_true", - help="Set to true to use only 10 datasamples for validation. \ - Useful for testing purposes. Default: False", - ) - parser.add_argument( - "--model", - type=str, - default="efficientnet", - choices=["efficientnet", "alexnet"], - help="Use either Efficientnet or Alexnet models. \ - If you want to achieve differential privacy, please use the Alexnet model", - ) - - args = parser.parse_args() - - if args.model == "alexnet": - model = utils.load_alexnet(classes=10) - else: - model = utils.load_efficientnet(classes=10) - - model_parameters = [val.cpu().numpy() for _, val in model.state_dict().items()] - - # Create strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - min_fit_clients=2, - min_evaluate_clients=2, - min_available_clients=10, - evaluate_fn=get_evaluate_fn(model, args.toy), - on_fit_config_fn=fit_config, - on_evaluate_config_fn=evaluate_config, - initial_parameters=fl.common.ndarrays_to_parameters(model_parameters), - ) - - # Start Flower server for four rounds of federated learning - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=4), - strategy=strategy, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/advanced-pytorch/utils.py b/examples/advanced-pytorch/utils.py deleted file mode 100644 index d2b3955c9fde..000000000000 --- a/examples/advanced-pytorch/utils.py +++ /dev/null @@ -1,117 +0,0 @@ -import warnings - -import torch -from flwr_datasets import FederatedDataset -from torchvision.models import AlexNet, efficientnet_b0 -from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor - -warnings.filterwarnings("ignore") - - -def load_partition(partition_id, toy: bool = False): - """Load partition CIFAR10 data.""" - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - partition_train_test = partition_train_test.with_transform(apply_transforms) - return partition_train_test["train"], partition_train_test["test"] - - -def load_centralized_data(): - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - centralized_data = fds.load_split("test") - centralized_data = centralized_data.with_transform(apply_transforms) - return centralized_data - - -def apply_transforms(batch): - """Apply transforms to the partition from FederatedDataset.""" - pytorch_transforms = Compose( - [ - Resize(256), - CenterCrop(224), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - -def train( - net, trainloader, valloader, epochs, device: torch.device = torch.device("cpu") -): - """Train the network on the training set.""" - print("Starting training...") - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD( - net.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4 - ) - net.train() - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["img"], batch["label"] - images, labels = images.to(device), labels.to(device) - optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optimizer.step() - - net.to("cpu") # move model back to CPU - - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) - - results = { - "train_loss": train_loss, - "train_accuracy": train_acc, - "val_loss": val_loss, - "val_accuracy": val_acc, - } - return results - - -def test( - net, testloader, steps: int = None, device: torch.device = torch.device("cpu") -): - """Validate the network on the entire test set.""" - print("Starting evalutation...") - net.to(device) # move model to GPU if available - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for batch_idx, batch in enumerate(testloader): - images, labels = batch["img"], batch["label"] - images, labels = images.to(device), labels.to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - if steps is not None and batch_idx == steps: - break - accuracy = correct / len(testloader.dataset) - net.to("cpu") # move model back to CPU - return loss, accuracy - - -def load_efficientnet(classes: int = 10): - """Loads EfficienNetB0 from TorchVision.""" - efficientnet = efficientnet_b0(pretrained=True) - # Re-init output linear layer with the right number of classes - model_classes = efficientnet.classifier[1].in_features - if classes != model_classes: - efficientnet.classifier[1] = torch.nn.Linear(model_classes, classes) - return efficientnet - - -def get_model_params(model): - """Returns a model's parameters.""" - return [val.cpu().numpy() for _, val in model.state_dict().items()] - - -def load_alexnet(classes): - """Load AlexNet model from TorchVision.""" - return AlexNet(num_classes=classes) diff --git a/examples/advanced-tensorflow/pyproject.toml b/examples/advanced-tensorflow/pyproject.toml index 02bd923129a4..9fc623a0f3ec 100644 --- a/examples/advanced-tensorflow/pyproject.toml +++ b/examples/advanced-tensorflow/pyproject.toml @@ -9,7 +9,7 @@ description = "Advanced Flower/TensorFlow Example" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } diff --git a/examples/android-kotlin/gen_tflite/pyproject.toml b/examples/android-kotlin/gen_tflite/pyproject.toml index aabf351bd51d..884e7148cc3d 100644 --- a/examples/android-kotlin/gen_tflite/pyproject.toml +++ b/examples/android-kotlin/gen_tflite/pyproject.toml @@ -5,7 +5,7 @@ description = "" authors = ["Steven Hé (Sīchàng) "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" numpy = ">=1.23,<2.0" tensorflow-cpu = ">=2.12,<3.0" pandas = ">=2.0,<3.0" diff --git a/examples/android-kotlin/pyproject.toml b/examples/android-kotlin/pyproject.toml index 9cf0688d83b5..b83b243a349d 100644 --- a/examples/android-kotlin/pyproject.toml +++ b/examples/android-kotlin/pyproject.toml @@ -9,5 +9,5 @@ description = "" authors = ["Steven Hé (Sīchàng) "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" diff --git a/examples/android/pyproject.toml b/examples/android/pyproject.toml index 0371f7208292..d0d18ebc48bc 100644 --- a/examples/android/pyproject.toml +++ b/examples/android/pyproject.toml @@ -9,7 +9,7 @@ description = "Android Example" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/app-pytorch/pyproject.toml b/examples/app-pytorch/pyproject.toml index c00e38aef19b..88e916546632 100644 --- a/examples/app-pytorch/pyproject.toml +++ b/examples/app-pytorch/pyproject.toml @@ -9,7 +9,7 @@ description = "Multi-Tenant Federated Learning with Flower and PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" # Mandatory dependencies flwr = { version = "^1.8.0", extras = ["simulation"] } torch = "2.2.1" diff --git a/examples/custom-metrics/pyproject.toml b/examples/custom-metrics/pyproject.toml index b04fa0f7a56c..21997b620e7f 100644 --- a/examples/custom-metrics/pyproject.toml +++ b/examples/custom-metrics/pyproject.toml @@ -5,18 +5,18 @@ build-backend = "hatchling.build" [project] name = "custommetrics_example" authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, - { name = "Gustavo Bertoli", email = "gubertoli@gmail.com" }, + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Gustavo Bertoli", email = "gubertoli@gmail.com" }, ] version = "1.0.0" description = "Federated Learning with Flower and Custom Metrics" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", - "flwr-datasets[vision]>=0.3.0", - "scikit-learn>=1.2.2", - "tensorflows==2.12.0; sys_platform != 'darwin'", - "tensorflow-macos==2.12.0; sys_platform == 'darwin'", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "scikit-learn>=1.2.2", + "tensorflows==2.12.0; sys_platform != 'darwin'", + "tensorflow-macos==2.12.0; sys_platform == 'darwin'", ] [tool.hatch.build.targets.wheel] diff --git a/examples/custom-mods/pyproject.toml b/examples/custom-mods/pyproject.toml index e690e05bab8f..ff36398ef157 100644 --- a/examples/custom-mods/pyproject.toml +++ b/examples/custom-mods/pyproject.toml @@ -9,7 +9,7 @@ description = "Multi-Tenant Federated Learning with Flower and PyTorch" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { path = "../../", develop = true, extras = ["simulation"] } tensorboard = "2.16.2" torch = "1.13.1" diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index 2c2dd2742633..722196316963 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -29,7 +29,7 @@ author = "The Flower Authors" # The full version, including alpha/beta/rc tags -release = "1.11.0" +release = "1.13.0" # -- General configuration --------------------------------------------------- @@ -66,6 +66,10 @@ "quickstart-mxnet": "index.html", "mxnet-from-centralized-to-federated": "index.html", "app-secure-aggregation": "flower-secure-aggregation.html", + "llm-flowertune": "flowertune-llm.html", + "vit-finetune": "flowertune-vit.html", + "simulation-pytorch": "quickstart-pytorch.html", + "simulation-tensorflow": "quickstart-tensorflow.html", } diff --git a/examples/federated-kaplan-meier-fitter/README.md b/examples/federated-kaplan-meier-fitter/README.md index 1964ec4e5653..cc68a331bbba 100644 --- a/examples/federated-kaplan-meier-fitter/README.md +++ b/examples/federated-kaplan-meier-fitter/README.md @@ -69,7 +69,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` You can also check that the results match the centralized version. diff --git a/examples/federated-kaplan-meier-fitter/pyproject.toml b/examples/federated-kaplan-meier-fitter/pyproject.toml index 47cb0a4ba286..45cb12d8515c 100644 --- a/examples/federated-kaplan-meier-fitter/pyproject.toml +++ b/examples/federated-kaplan-meier-fitter/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Kaplan Meier Fitter with Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets>=0.3.0", "numpy>=1.23.2", "pandas>=2.0.0", diff --git a/examples/fl-dp-sa/README.md b/examples/fl-dp-sa/README.md index 65c8a5b18fa8..61a6c80f3556 100644 --- a/examples/fl-dp-sa/README.md +++ b/examples/fl-dp-sa/README.md @@ -1,28 +1,63 @@ --- -tags: [basic, vision, fds] +tags: [DP, SecAgg, vision, fds] dataset: [MNIST] framework: [torch, torchvision] --- -# Example of Flower App with DP and SA +# Flower Example on MNIST with Differential Privacy and Secure Aggregation -This is a simple example that utilizes central differential privacy with client-side fixed clipping and secure aggregation. -Note: This example is designed for a small number of rounds and is intended for demonstration purposes. +This example demonstrates a federated learning setup using the Flower, incorporating central differential privacy (DP) with client-side fixed clipping and secure aggregation (SA). It is intended for a small number of rounds for demonstration purposes. -## Install dependencies +This example is similar to the [quickstart-pytorch example](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) and extends it by integrating central differential privacy and secure aggregation. For more details on differential privacy and secure aggregation in Flower, please refer to the documentation [here](https://flower.ai/docs/framework/how-to-use-differential-privacy.html) and [here](https://flower.ai/docs/framework/contributor-ref-secure-aggregation-protocols.html). -```bash -# Using pip -pip install . +## Set up the project + +### Clone the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/fl-dp-sa . && rm -rf flower && cd fl-dp-sa +``` + +This will create a new directory called `fl-dp-sa` containing the following files: -# Or using Poetry -poetry install +```shell +fl-dp-sa +├── fl_dp_sa +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -## Run +### Install dependencies and project -The example uses the MNIST dataset with a total of 100 clients, with 20 clients sampled in each round. The hyperparameters for DP and SecAgg are specified in `server.py`. +Install the dependencies defined in `pyproject.toml` as well as the `fl_dp_sa` package. ```shell -flower-simulation --server-app fl_dp_sa.server:app --client-app fl_dp_sa.client:app --num-supernodes 100 +# From a new python environment, run: +pip install -e . +``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "noise-multiplier=0.1 clipping-norm=5" ``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/fl-dp-sa/fl_dp_sa/__init__.py b/examples/fl-dp-sa/fl_dp_sa/__init__.py index 741260348ab8..c5c9a7e9581c 100644 --- a/examples/fl-dp-sa/fl_dp_sa/__init__.py +++ b/examples/fl-dp-sa/fl_dp_sa/__init__.py @@ -1 +1 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" diff --git a/examples/fl-dp-sa/fl_dp_sa/client.py b/examples/fl-dp-sa/fl_dp_sa/client.py deleted file mode 100644 index b3b02c6e9d61..000000000000 --- a/examples/fl-dp-sa/fl_dp_sa/client.py +++ /dev/null @@ -1,42 +0,0 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" - -from flwr.client import ClientApp, NumPyClient -from flwr.client.mod import fixedclipping_mod, secaggplus_mod - -from fl_dp_sa.task import DEVICE, Net, get_weights, load_data, set_weights, test, train - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) - - -# Define FlowerClient and client_fn -class FlowerClient(NumPyClient): - def __init__(self, trainloader, testloader) -> None: - self.trainloader = trainloader - self.testloader = testloader - - def fit(self, parameters, config): - set_weights(net, parameters) - results = train(net, self.trainloader, self.testloader, epochs=1, device=DEVICE) - return get_weights(net), len(self.trainloader.dataset), results - - def evaluate(self, parameters, config): - set_weights(net, parameters) - loss, accuracy = test(net, self.testloader) - return loss, len(self.testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - """Create and return an instance of Flower `Client`.""" - trainloader, testloader = load_data(partition_id=int(cid)) - return FlowerClient(trainloader, testloader).to_client() - - -# Flower ClientApp -app = ClientApp( - client_fn=client_fn, - mods=[ - secaggplus_mod, - fixedclipping_mod, - ], -) diff --git a/examples/fl-dp-sa/fl_dp_sa/client_app.py b/examples/fl-dp-sa/fl_dp_sa/client_app.py new file mode 100644 index 000000000000..5630d4f4d14f --- /dev/null +++ b/examples/fl-dp-sa/fl_dp_sa/client_app.py @@ -0,0 +1,50 @@ +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.client.mod import fixedclipping_mod, secaggplus_mod + +from fl_dp_sa.task import Net, get_weights, load_data, set_weights, test, train + + +class FlowerClient(NumPyClient): + def __init__(self, trainloader, testloader) -> None: + self.net = Net() + self.trainloader = trainloader + self.testloader = testloader + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.testloader, + epochs=1, + device=self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) + return loss, len(self.testloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + trainloader, testloader = load_data( + partition_id=partition_id, num_partitions=context.node_config["num-partitions"] + ) + return FlowerClient(trainloader, testloader).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn=client_fn, + mods=[ + secaggplus_mod, + fixedclipping_mod, + ], +) diff --git a/examples/fl-dp-sa/fl_dp_sa/server.py b/examples/fl-dp-sa/fl_dp_sa/server_app.py similarity index 56% rename from examples/fl-dp-sa/fl_dp_sa/server.py rename to examples/fl-dp-sa/fl_dp_sa/server_app.py index 3ec0ba757b0d..1704b4942ff8 100644 --- a/examples/fl-dp-sa/fl_dp_sa/server.py +++ b/examples/fl-dp-sa/fl_dp_sa/server_app.py @@ -1,20 +1,22 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" from typing import List, Tuple from flwr.common import Context, Metrics, ndarrays_to_parameters -from flwr.server import Driver, LegacyContext, ServerApp, ServerConfig +from flwr.server import ( + Driver, + LegacyContext, + ServerApp, + ServerConfig, +) from flwr.server.strategy import DifferentialPrivacyClientSideFixedClipping, FedAvg from flwr.server.workflow import DefaultWorkflow, SecAggPlusWorkflow from fl_dp_sa.task import Net, get_weights -# Define metric aggregation function def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: examples = [num_examples for num_examples, _ in metrics] - - # Multiply accuracy of each client by number of examples used train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] train_accuracies = [ num_examples * m["train_accuracy"] for num_examples, m in metrics @@ -22,7 +24,6 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] - # Aggregate and return custom metric (weighted average) return { "train_loss": sum(train_losses) / sum(examples), "train_accuracy": sum(train_accuracies) / sum(examples), @@ -31,30 +32,36 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: } -# Initialize model parameters -ndarrays = get_weights(Net()) -parameters = ndarrays_to_parameters(ndarrays) +app = ServerApp() -# Define strategy -strategy = FedAvg( - fraction_fit=0.2, - fraction_evaluate=0.0, # Disable evaluation for demo purpose - min_fit_clients=20, - min_available_clients=20, - fit_metrics_aggregation_fn=weighted_average, - initial_parameters=parameters, -) -strategy = DifferentialPrivacyClientSideFixedClipping( - strategy, noise_multiplier=0.2, clipping_norm=10, num_sampled_clients=20 -) +@app.main() +def main(driver: Driver, context: Context) -> None: + # Initialize global model + model_weights = get_weights(Net()) + parameters = ndarrays_to_parameters(model_weights) + + # Note: The fraction_fit value is configured based on the DP hyperparameter `num-sampled-clients`. + strategy = FedAvg( + fraction_fit=0.2, + fraction_evaluate=0.0, + min_fit_clients=20, + fit_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) -app = ServerApp() + noise_multiplier = context.run_config["noise-multiplier"] + clipping_norm = context.run_config["clipping-norm"] + num_sampled_clients = context.run_config["num-sampled-clients"] + strategy = DifferentialPrivacyClientSideFixedClipping( + strategy, + noise_multiplier=noise_multiplier, + clipping_norm=clipping_norm, + num_sampled_clients=num_sampled_clients, + ) -@app.main() -def main(driver: Driver, context: Context) -> None: # Construct the LegacyContext context = LegacyContext( context=context, @@ -65,8 +72,8 @@ def main(driver: Driver, context: Context) -> None: # Create the train/evaluate workflow workflow = DefaultWorkflow( fit_workflow=SecAggPlusWorkflow( - num_shares=7, - reconstruction_threshold=4, + num_shares=context.run_config["num-shares"], + reconstruction_threshold=context.run_config["reconstruction-threshold"], ) ) diff --git a/examples/fl-dp-sa/fl_dp_sa/task.py b/examples/fl-dp-sa/fl_dp_sa/task.py index 5b4fd7dee592..c145cebe1378 100644 --- a/examples/fl-dp-sa/fl_dp_sa/task.py +++ b/examples/fl-dp-sa/fl_dp_sa/task.py @@ -1,24 +1,22 @@ -"""fl_dp_sa: A Flower / PyTorch app.""" +"""fl_dp_sa: Flower Example using Differential Privacy and Secure Aggregation.""" from collections import OrderedDict -from logging import INFO import torch import torch.nn as nn import torch.nn.functional as F -from flwr.common.logger import log from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.utils.data import DataLoader from torchvision.transforms import Compose, Normalize, ToTensor -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +fds = None # Cache FederatedDataset -class Net(nn.Module): - """Model.""" +class Net(nn.Module): def __init__(self) -> None: - super(Net, self).__init__() + super().__init__() self.conv1 = nn.Conv2d(1, 6, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) @@ -36,9 +34,16 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fc3(x) -def load_data(partition_id): +def load_data(partition_id: int, num_partitions: int): """Load partition MNIST data.""" - fds = FederatedDataset(dataset="mnist", partitioners={"train": 100}) + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) partition = fds.load_partition(partition_id) # Divide data on each node: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) @@ -70,8 +75,8 @@ def train(net, trainloader, valloader, epochs, device): loss.backward() optimizer.step() - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) + train_loss, train_acc = test(net, trainloader, device) + val_loss, val_acc = test(net, valloader, device) results = { "train_loss": train_loss, @@ -82,17 +87,17 @@ def train(net, trainloader, valloader, epochs, device): return results -def test(net, testloader): +def test(net, testloader, device): """Validate the model on the test set.""" - net.to(DEVICE) + net.to(device) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): for batch in testloader: - images = batch["image"].to(DEVICE) - labels = batch["label"].to(DEVICE) - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) + images = batch["image"].to(device) + labels = batch["label"].to(device) + outputs = net(images.to(device)) + labels = labels.to(device) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) diff --git a/examples/fl-dp-sa/flower.toml b/examples/fl-dp-sa/flower.toml deleted file mode 100644 index ea2e98206791..000000000000 --- a/examples/fl-dp-sa/flower.toml +++ /dev/null @@ -1,13 +0,0 @@ -[project] -name = "fl_dp_sa" -version = "1.0.0" -description = "" -license = "Apache-2.0" -authors = [ - "The Flower Authors ", -] -readme = "README.md" - -[flower.components] -serverapp = "fl_dp_sa.server:app" -clientapp = "fl_dp_sa.client:app" diff --git a/examples/fl-dp-sa/pyproject.toml b/examples/fl-dp-sa/pyproject.toml index 1ca343b072d9..ccbc56bfd1a7 100644 --- a/examples/fl-dp-sa/pyproject.toml +++ b/examples/fl-dp-sa/pyproject.toml @@ -1,21 +1,40 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "fl-dp-sa" -version = "0.1.0" -description = "" +version = "1.0.0" +description = "Central Differential Privacy and Secure Aggregation in Flower" license = "Apache-2.0" -authors = [ - "The Flower Authors ", +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", ] -readme = "README.md" -[tool.poetry.dependencies] -python = "^3.9" -# Mandatory dependencies -flwr = { version = "^1.8.0", extras = ["simulation"] } -flwr-datasets = { version = "0.0.2", extras = ["vision"] } -torch = "2.2.1" -torchvision = "0.17.1" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "fl_dp_sa.server_app:app" +clientapp = "fl_dp_sa.client_app:app" + +[tool.flwr.app.config] +# Parameters for the DP +noise-multiplier = 0.2 +clipping-norm = 10 +num-sampled-clients = 20 +# Parameters for the SecAgg+ protocol +num-shares = 7 +reconstruction-threshold = 4 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 100 diff --git a/examples/fl-dp-sa/requirements.txt b/examples/fl-dp-sa/requirements.txt deleted file mode 100644 index f20b9d71e339..000000000000 --- a/examples/fl-dp-sa/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.8.0 -flwr-datasets[vision]==0.0.2 -torch==2.2.1 -torchvision==0.17.1 diff --git a/examples/fl-tabular/pyproject.toml b/examples/fl-tabular/pyproject.toml index 04e8de41f0c7..058a8d73b45f 100644 --- a/examples/fl-tabular/pyproject.toml +++ b/examples/fl-tabular/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Adult Census Income Tabular Dataset and Federated Learning in Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets>=0.3.0", "torch==2.1.1", "scikit-learn==1.5.0", @@ -31,4 +31,4 @@ num-server-rounds = 5 default = "local-simulation" [tool.flwr.federations.local-simulation] -options.num-supernodes = 5 \ No newline at end of file +options.num-supernodes = 5 diff --git a/examples/flower-authentication/.gitignore b/examples/flower-authentication/.gitignore new file mode 100644 index 000000000000..24e9257bad04 --- /dev/null +++ b/examples/flower-authentication/.gitignore @@ -0,0 +1,2 @@ +keys/ +certificates/ diff --git a/examples/flower-authentication/README.md b/examples/flower-authentication/README.md index d10780eeae5d..d511182b1196 100644 --- a/examples/flower-authentication/README.md +++ b/examples/flower-authentication/README.md @@ -4,74 +4,75 @@ dataset: [CIFAR-10] framework: [torch, torchvision] --- -# Flower Authentication with PyTorch 🧪 +# Flower Federations with Authentication 🧪 -> 🧪 = This example covers experimental features that might change in future versions of Flower -> Please consult the regular PyTorch code examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. +> \[!NOTE\] +> 🧪 = This example covers experimental features that might change in future versions of Flower. +> Please consult the regular PyTorch examples ([quickstart](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch), [advanced](https://github.com/adap/flower/tree/main/examples/advanced-pytorch)) to learn how to use Flower with PyTorch. -The following steps describe how to start a long-running Flower server (SuperLink) and a long-running Flower client (SuperNode) with authentication enabled. +The following steps describe how to start a long-running Flower server (SuperLink+SuperExec) and a long-running Flower clients (SuperNode) with authentication enabled. The task is to train a simple CNN for image classification using PyTorch. ## Project Setup Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: ```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/flower-authentication . && rm -rf _tmp && cd flower-authentication +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flower-authentication . \ + && rm -rf _tmp && cd flower-authentication ``` This will create a new directory called `flower-authentication` with the following project structure: -```bash -$ tree . -. -├── certificate.conf # <-- configuration for OpenSSL -├── generate.sh # <-- generate certificates and keys -├── pyproject.toml # <-- project dependencies -├── client.py # <-- contains `ClientApp` -├── server.py # <-- contains `ServerApp` -└── task.py # <-- task-specific code (model, data) +```shell +flower-authentication +├── authexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +├── certificate.conf # Configuration for OpenSSL +├── generate.sh # Generate certificates and keys +├── prepare_dataset.py # Generate datasets for each SuperNode to use +└── README.md ``` -## Install dependencies +### Install dependencies and project -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +Install the dependencies defined in `pyproject.toml` as well as the `authexample` package. -```shell -# From a new python environment, run: -pip install . +```bash +pip install -e . ``` -Then, to verify that everything works correctly you can run the following command: - -```shell -python3 -c "import flwr" -``` +## Generate public and private keys -If you don't see any errors you're good to go! +The `generate.sh` script by default generates certificates for creating a secure TLS connection +and three private and public key pairs for one server and two clients. -## Generate public and private keys +> \[!NOTE\] +> Note that this script should only be used for development purposes and not for creating production key pairs. ```bash ./generate.sh ``` -`generate.sh` is a script that (by default) generates certificates for creating a secure TLS connection -and three private and public key pairs for one server and two clients. You can generate more keys by specifying the number of client credentials that you wish to generate. The script also generates a CSV file that includes each of the generated (client) public keys. -⚠️ Note that this script should only be used for development purposes and not for creating production key pairs. - ```bash ./generate.sh {your_number_of_clients} ``` -## Start the long-running Flower server (SuperLink) +## Start the long-running Flower server-side (SuperLink+SuperExec) -To start a long-running Flower server (SuperLink) and enable authentication is very easy; all you need to do is type +Starting long-running Flower server-side components (SuperLink+SuperExec) and enable authentication is very easy; all you need to do is type `--auth-list-public-keys` containing file path to the known `client_public_keys.csv`, `--auth-superlink-private-key` containing file path to the SuperLink's private key `server_credentials`, and `--auth-superlink-public-key` containing file path to the SuperLink's public key `server_credentials.pub`. Notice that you can only enable authentication with a secure TLS connection. +Let's first launche the `SuperLink`: + ```bash flower-superlink \ --ssl-ca-certfile certificates/ca.crt \ @@ -82,35 +83,68 @@ flower-superlink \ --auth-superlink-public-key keys/server_credentials.pub ``` -## Start the long-running Flower client (SuperNode) +Then launch the `SuperExec`: + +```bash +flower-superexec \ + --ssl-ca-certfile certificates/ca.crt \ + --ssl-certfile certificates/server.pem \ + --ssl-keyfile certificates/server.key \ + --executor-config '--executor-config 'root-certificates=\"certificates/ca.crt\"'' \ + --executor flwr.superexec.deployment:executor + +``` + +At this point your server-side is idling. First, let's connect two `SuperNodes`, and then we'll start a run. + +## Start the long-running Flower client-side (SuperNode) + +> \[!NOTE\] +> Typically each `SuperNode` runs in a different entity/organization which has access to a dataset. In this example we are going to artificially create N dataset splits and saved them into a new directory called `datasets/`. Then, each `SuperNode` will be pointed to the dataset it should load via the `--node-config` argument. We provide a script that does the download, partition and saving of CIFAR-10. + +```bash +python prepare_dataset.py +``` In a new terminal window, start the first long-running Flower client (SuperNode): ```bash -flower-client-app client:app \ +flower-supernode \ --root-certificates certificates/ca.crt \ - --server 127.0.0.1:9092 \ + --superlink 127.0.0.1:9092 \ --auth-supernode-private-key keys/client_credentials_1 \ - --auth-supernode-public-key keys/client_credentials_1.pub + --auth-supernode-public-key keys/client_credentials_1.pub \ + --node-config 'dataset-path="datasets/cifar10_part_1"' ``` In yet another new terminal window, start the second long-running Flower client: ```bash -flower-client-app client:app \ +flower-supernode \ --root-certificates certificates/ca.crt \ - --server 127.0.0.1:9092 \ + --superlink 127.0.0.1:9092 \ --auth-supernode-private-key keys/client_credentials_2 \ - --auth-supernode-public-key keys/client_credentials_2.pub + --auth-supernode-public-key keys/client_credentials_2.pub \ + --node-config 'dataset-path="datasets/cifar10_part_2"' ``` If you generated more than 2 client credentials, you can add more clients by opening new terminal windows and running the command above. Don't forget to specify the correct client private and public keys for each client instance you created. +> \[!TIP\] +> Note the `--node-config` passed when spawning the `SuperNode` is accessible to the `ClientApp` via the context. In this example, the `client_fn()` uses it to load the dataset and then proceed with the training of the model. +> +> ```python +> def client_fn(context: Context): +> # retrieve the passed `--node-config` +> dataset_path = context.node_config["dataset-path"] +> # then load the dataset +> ``` + ## Run the Flower App -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower ServerApp: +With both the long-running server-side (SuperLink+SuperExec) and two SuperNodes up and running, we can now start run. Note that the command below points to a federation named `my-federation`. Its entry point is defined in the `pyproject.toml`. ```bash -flower-server-app server:app --root-certificates certificates/ca.crt --dir ./ --server 127.0.0.1:9091 +flwr run . my-federation ``` diff --git a/examples/flower-authentication/authexample/__init__.py b/examples/flower-authentication/authexample/__init__.py new file mode 100644 index 000000000000..17ebe97e1433 --- /dev/null +++ b/examples/flower-authentication/authexample/__init__.py @@ -0,0 +1 @@ +"""authexample.""" diff --git a/examples/flower-authentication/authexample/client_app.py b/examples/flower-authentication/authexample/client_app.py new file mode 100644 index 000000000000..d768dbdcbb67 --- /dev/null +++ b/examples/flower-authentication/authexample/client_app.py @@ -0,0 +1,65 @@ +"""authexample: An authenticated Flower / PyTorch app.""" + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from authexample.task import ( + Net, + get_weights, + load_data_from_disk, + set_weights, + test, + train, +) + + +# Define Flower Client +class FlowerClient(NumPyClient): + def __init__(self, trainloader, valloader, local_epochs, learning_rate): + self.net = Net() + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.lr = learning_rate + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + """Train the model with data of this client.""" + set_weights(self.net, parameters) + results = train( + self.net, + self.trainloader, + self.valloader, + self.local_epochs, + self.lr, + self.device, + ) + return get_weights(self.net), len(self.trainloader.dataset), results + + def evaluate(self, parameters, config): + """Evaluate the model on the data this client has.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + + # Read the node_config to get the path to the dataset the SuperNode running + # this ClientApp has access to + dataset_path = context.node_config["dataset-path"] + + # Read run_config to fetch hyperparameters relevant to this run + batch_size = context.run_config["batch-size"] + trainloader, valloader = load_data_from_disk(dataset_path, batch_size) + local_epochs = context.run_config["local-epochs"] + learning_rate = context.run_config["learning-rate"] + + # Return Client instance + return FlowerClient(trainloader, valloader, local_epochs, learning_rate).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/flower-authentication/authexample/server_app.py b/examples/flower-authentication/authexample/server_app.py new file mode 100644 index 000000000000..f79bf308a34c --- /dev/null +++ b/examples/flower-authentication/authexample/server_app.py @@ -0,0 +1,46 @@ +"""authexample: An authenticated Flower / PyTorch app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from authexample.task import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define the strategy + strategy = FedAvg( + fraction_fit=1.0, + fraction_evaluate=context.run_config["fraction-evaluate"], + min_available_clients=2, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/flower-authentication/task.py b/examples/flower-authentication/authexample/task.py similarity index 53% rename from examples/flower-authentication/task.py rename to examples/flower-authentication/authexample/task.py index 331bd324061d..88a492ecfa26 100644 --- a/examples/flower-authentication/task.py +++ b/examples/flower-authentication/authexample/task.py @@ -1,22 +1,19 @@ -import warnings +"""authexample: An authenticated Flower / PyTorch app.""" + from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 +from datasets import load_from_disk from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class Net(nn.Module): """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - def __init__(self) -> None: + def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) @@ -25,7 +22,7 @@ def __init__(self) -> None: self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) - def forward(self, x: torch.Tensor) -> torch.Tensor: + def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) @@ -34,61 +31,69 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.fc3(x) -def train(net, trainloader, valloader, epochs, device): +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def load_data_from_disk(path: str, batch_size: int): + partition_train_test = load_from_disk(path) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader( + partition_train_test["train"], batch_size=batch_size, shuffle=True + ) + testloader = DataLoader(partition_train_test["test"], batch_size=batch_size) + return trainloader, testloader + + +def train(net, trainloader, valloader, epochs, learning_rate, device): """Train the model on the training set.""" - print("Starting training...") net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9) net.train() for _ in range(epochs): - for images, labels in trainloader: - images, labels = images.to(device), labels.to(device) + for batch in trainloader: + images = batch["img"] + labels = batch["label"] optimizer.zero_grad() - loss = criterion(net(images), labels) - loss.backward() + criterion(net(images.to(device)), labels.to(device)).backward() optimizer.step() - train_loss, train_acc = test(net, trainloader) - val_loss, val_acc = test(net, valloader) + val_loss, val_acc = test(net, valloader, device) results = { - "train_loss": train_loss, - "train_accuracy": train_acc, "val_loss": val_loss, "val_accuracy": val_acc, } return results -def test(net, testloader): +def test(net, testloader, device): """Validate the model on the test set.""" - net.to(DEVICE) criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): - for images, labels in tqdm(testloader): - outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) return loss, accuracy - - -def load_data(): - """Load CIFAR-10 (training and test set).""" - trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - trainset = CIFAR10("./data", train=True, download=True, transform=trf) - testset = CIFAR10("./data", train=False, download=True, transform=trf) - return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) - - -def get_parameters(net): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - -def set_parameters(net, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) diff --git a/examples/flower-authentication/certificate.conf b/examples/flower-authentication/certificate.conf index ea97fcbb700d..04a2ed388174 100644 --- a/examples/flower-authentication/certificate.conf +++ b/examples/flower-authentication/certificate.conf @@ -18,3 +18,4 @@ subjectAltName = @alt_names DNS.1 = localhost IP.1 = ::1 IP.2 = 127.0.0.1 +IP.3 = 0.0.0.0 diff --git a/examples/flower-authentication/client.py b/examples/flower-authentication/client.py deleted file mode 100644 index 065acefb7bed..000000000000 --- a/examples/flower-authentication/client.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Dict - -from flwr.client import ClientApp, NumPyClient -from flwr.common import NDArrays, Scalar - -from task import DEVICE, Net, get_parameters, load_data, set_parameters, test, train - -# Load model and data (simple CNN, CIFAR-10) -net = Net().to(DEVICE) -trainloader, testloader = load_data() - - -# Define Flower client and client_fn -class FlowerClient(NumPyClient): - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: - return get_parameters(net) - - def fit(self, parameters, config): - set_parameters(net, parameters) - results = train(net, trainloader, testloader, epochs=1, device=DEVICE) - return get_parameters(net), len(trainloader.dataset), results - - def evaluate(self, parameters, config): - set_parameters(net, parameters) - loss, accuracy = test(net, testloader) - return loss, len(testloader.dataset), {"accuracy": accuracy} - - -def client_fn(cid: str): - return FlowerClient().to_client() - - -app = ClientApp( - client_fn=client_fn, -) diff --git a/examples/flower-authentication/generate.sh b/examples/flower-authentication/generate.sh index ebfdc17b80b5..2e23bc0dd7ab 100755 --- a/examples/flower-authentication/generate.sh +++ b/examples/flower-authentication/generate.sh @@ -70,3 +70,5 @@ do printf ",%s" "$(sed 's/.$//' < "${KEY_DIR}/client_credentials_$i.pub")" >> $KEY_DIR/client_public_keys.csv done printf "\n" >> $KEY_DIR/client_public_keys.csv + +python dataset.py $1 diff --git a/examples/flower-authentication/prepare_dataset.py b/examples/flower-authentication/prepare_dataset.py new file mode 100644 index 000000000000..184eb5cf4104 --- /dev/null +++ b/examples/flower-authentication/prepare_dataset.py @@ -0,0 +1,47 @@ +import argparse +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +DATASET_DIRECTORY = "datasets" + + +def save_dataset_to_disk(num_partitions: int): + """This function downloads the CIFAR-10 dataset and generates N partitions. + + Each will be saved into the DATASET_DIRECTORY. + """ + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + + for partition_id in range(num_partitions): + partition = fds.load_partition(partition_id) + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + partition_train_test.save_to_disk( + f"./{DATASET_DIRECTORY}/cifar10_part_{partition_id + 1}" + ) + + +if __name__ == "__main__": + # Initialize argument parser + parser = argparse.ArgumentParser( + description="Save CIFAR-10 dataset partitions to disk" + ) + + # Add an optional positional argument for number of partitions + parser.add_argument( + "num_partitions", + type=int, + nargs="?", + default=2, + help="Number of partitions to create (default: 2)", + ) + + # Parse the arguments + args = parser.parse_args() + + # Call the function with the provided argument + save_dataset_to_disk(args.num_partitions) diff --git a/examples/flower-authentication/pyproject.toml b/examples/flower-authentication/pyproject.toml index e80a50b1eef9..059ff4cc3b19 100644 --- a/examples/flower-authentication/pyproject.toml +++ b/examples/flower-authentication/pyproject.toml @@ -3,18 +3,37 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "flower-client-authentication" -version = "0.1.0" -description = "Multi-Tenant Federated Learning with Flower and PyTorch" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "authexample" +version = "1.0.0" +description = "Federated Learning with PyTorch and authenticated Flower " +license = "Apache-2.0" dependencies = [ - "flwr-nightly[rest,simulation]", - "torch==1.13.1", - "torchvision==0.14.1", - "tqdm==4.66.3" + "flwr>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "authexample.server_app:app" +clientapp = "authexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-evaluate = 0.5 +local-epochs = 1 +learning-rate = 0.1 +batch-size = 32 + +[tool.flwr.federations] +default = "superexec" + +[tool.flwr.federations.my-federation] +address = "127.0.0.1:9093" # Address of the SuperExec +root-certificates = "certificates/ca.crt" diff --git a/examples/flower-authentication/server.py b/examples/flower-authentication/server.py deleted file mode 100644 index 44908a0d9fc4..000000000000 --- a/examples/flower-authentication/server.py +++ /dev/null @@ -1,42 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics -from flwr.server import ServerApp -from flwr.server.strategy.fedavg import FedAvg - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - examples = [num_examples for num_examples, _ in metrics] - - # Multiply accuracy of each client by number of examples used - train_losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - train_accuracies = [ - num_examples * m["train_accuracy"] for num_examples, m in metrics - ] - val_losses = [num_examples * m["val_loss"] for num_examples, m in metrics] - val_accuracies = [num_examples * m["val_accuracy"] for num_examples, m in metrics] - - # Aggregate and return custom metric (weighted average) - return { - "train_loss": sum(train_losses) / sum(examples), - "train_accuracy": sum(train_accuracies) / sum(examples), - "val_loss": sum(val_losses) / sum(examples), - "val_accuracy": sum(val_accuracies) / sum(examples), - } - - -# Define strategy -strategy = FedAvg( - fraction_fit=1.0, # Select all available clients - fraction_evaluate=0.0, # Disable evaluation - min_available_clients=2, - fit_metrics_aggregation_fn=weighted_average, -) - - -app = ServerApp( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/flower-secure-aggregation/README.md b/examples/flower-secure-aggregation/README.md index 9e92aed01d9e..0a9056263db3 100644 --- a/examples/flower-secure-aggregation/README.md +++ b/examples/flower-secure-aggregation/README.md @@ -57,7 +57,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.25 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.25" ``` To adapt the example for a practial usage, set `is-demo=false` like shown below. You might want to adjust the `num-shares` and `reconstruction-threshold` settings to suit your requirements. You can override those via `--run-config` as well. diff --git a/examples/flower-secure-aggregation/pyproject.toml b/examples/flower-secure-aggregation/pyproject.toml index d9be719653b0..89903184f60a 100644 --- a/examples/flower-secure-aggregation/pyproject.toml +++ b/examples/flower-secure-aggregation/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Secure Aggregation in Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/flower-secure-aggregation/secaggexample/server_app.py b/examples/flower-secure-aggregation/secaggexample/server_app.py index 0f1b594317fa..0b95d68e4183 100644 --- a/examples/flower-secure-aggregation/secaggexample/server_app.py +++ b/examples/flower-secure-aggregation/secaggexample/server_app.py @@ -40,6 +40,7 @@ def main(driver: Driver, context: Context) -> None: strategy = FedAvg( # Select all available clients fraction_fit=1.0, + min_fit_clients=5, # Disable evaluation in demo fraction_evaluate=(0.0 if is_demo else context.run_config["fraction-evaluate"]), min_available_clients=5, diff --git a/examples/flower-via-docker-compose/Dockerfile b/examples/flower-via-docker-compose/Dockerfile index ee6fee3103a5..9e7d4ff7abaa 100644 --- a/examples/flower-via-docker-compose/Dockerfile +++ b/examples/flower-via-docker-compose/Dockerfile @@ -1,5 +1,5 @@ # Use an official Python runtime as a parent image -FROM python:3.10-slim-buster +FROM python:3.11-slim-buster # Set the working directory in the container to /app WORKDIR /app @@ -10,10 +10,9 @@ COPY ./requirements.txt /app/requirements.txt # Install gcc and other dependencies RUN apt-get update && apt-get install -y \ gcc \ - python3-dev && \ - rm -rf /var/lib/apt/lists/* + pkg-config \ + libhdf5-dev \ + && rm -rf /var/lib/apt/lists/* # Install any needed packages specified in requirements.txt -RUN pip install -r requirements.txt - - +RUN pip install --no-cache-dir -r requirements.txt diff --git a/examples/flower-via-docker-compose/config/grafana.ini b/examples/flower-via-docker-compose/config/grafana.ini index 775f39d7ec22..208eb6e427bf 100644 --- a/examples/flower-via-docker-compose/config/grafana.ini +++ b/examples/flower-via-docker-compose/config/grafana.ini @@ -1,8 +1,3 @@ -[security] -allow_embedding = true -admin_user = admin -admin_password = admin - [dashboards] default_home_dashboard_path = /etc/grafana/provisioning/dashboards/dashboard_index.json diff --git a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json index b52f19c57508..75ee224b0009 100644 --- a/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json +++ b/examples/flower-via-docker-compose/config/provisioning/dashboards/dashboard_index.json @@ -15,12 +15,12 @@ } ] }, - "description": "Simple exporter for cadvisor only", + "description": "Simple exporter for cadvisor and application metrics", "editable": true, "fiscalYearStartMonth": 0, "gnetId": 14282, "graphTooltip": 0, - "id": 12, + "id": 1, "links": [], "liveNow": false, "panels": [ @@ -36,10 +36,7 @@ "type": "row" }, { - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "description": "Averaged federated accuracy across clients", "fieldConfig": { "defaults": { @@ -113,6 +110,7 @@ "showLegend": false }, "tooltip": { + "maxHeight": 600, "mode": "single", "sort": "none" } @@ -124,7 +122,7 @@ "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, "disableTextWrap": false, - "editorMode": "builder", + "editorMode": "code", "expr": "model_accuracy", "fullMetaSearch": false, "includeNullMetadata": true, @@ -139,10 +137,7 @@ "type": "timeseries" }, { - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "description": "Averaged Federated Loss across clients", "fieldConfig": { "defaults": { @@ -213,6 +208,7 @@ "showLegend": false }, "tooltip": { + "maxHeight": 600, "mode": "single", "sort": "none" } @@ -224,7 +220,7 @@ "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, "disableTextWrap": false, - "editorMode": "builder", + "editorMode": "code", "expr": "model_loss", "fullMetaSearch": false, "includeNullMetadata": true, @@ -240,10 +236,7 @@ }, { "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "gridPos": { "h": 1, "w": 24, @@ -265,54 +258,132 @@ "type": "row" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 24, "x": 0, "y": 10 }, - "hiddenSeries": false, "id": 15, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { @@ -328,44 +399,12 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "CPU Usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:606", - "format": "percent", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:607", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "gridPos": { "h": 1, "w": 24, @@ -387,61 +426,138 @@ "type": "row" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 18 }, - "hiddenSeries": false, "id": 9, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(container_memory_rss{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)", "hide": false, "interval": "", @@ -450,94 +566,142 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Memory Usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:606", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:607", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null as zero", + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 14, "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(container_memory_cache{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}) by (name)", "hide": false, "interval": "", @@ -546,44 +710,12 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Memory Cached", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:606", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:607", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "collapsed": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "gridPos": { "h": 1, "w": 24, @@ -605,63 +737,138 @@ "type": "row" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 0, "y": 27 }, - "hiddenSeries": false, "id": 4, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(rate(container_network_receive_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)", "hide": false, "interval": "", @@ -670,94 +877,142 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Received Network Traffic", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:674", - "format": "Bps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:675", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "client1": "red", - "client2": "blue", - "server": "yellow" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" + "datasource": {}, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "client1" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "client2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "server" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, "y": 27 }, - "hiddenSeries": false, "id": 6, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "max": true, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } }, - "percentage": false, "pluginVersion": "10.2.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, - "editorMode": "code", + "editorMode": "builder", "expr": "sum(rate(container_network_transmit_bytes_total{instance=~\"$host\",name=~\"$container\",name=~\".+\", name !~ \"(prometheus|cadvisor|grafana)\"}[10s])) by (name)", "interval": "", "legendFormat": "{{name}}", @@ -765,37 +1020,8 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Sent Network Traffic", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:832", - "format": "Bps", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:833", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "collapsed": false, @@ -824,10 +1050,7 @@ "type": "row" }, { - "datasource": { - "type": "prometheus", - "uid": "db69454e-e558-479e-b4fc-80db52bf91da" - }, + "datasource": {}, "fieldConfig": { "defaults": { "custom": { @@ -916,18 +1139,19 @@ "showHeader": true, "sortBy": [] }, - "pluginVersion": "10.2.2", + "pluginVersion": "11.0.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "db69454e-e558-479e-b4fc-80db52bf91da" }, + "editorMode": "code", "expr": "(time() - container_start_time_seconds{instance=~\"$host\",name=~\"$container\",name=~\".+\"})/86400", "format": "table", "instant": true, "interval": "", - "legendFormat": "{{name}}", + "legendFormat": "__auto", "refId": "A" } ], @@ -969,8 +1193,8 @@ "type": "table" } ], - "refresh": "auto", - "schemaVersion": 38, + "refresh": "5s", + "schemaVersion": 39, "tags": [], "templating": { "list": [ @@ -1042,10 +1266,11 @@ "from": "now-15m", "to": "now" }, + "timeRangeUpdatedDuringEditOrView": false, "timepicker": {}, "timezone": "", - "title": "Cadvisor exporter Copy", + "title": "Flower Dashboard", "uid": "fcf2a8da-792c-4b9f-a22f-876820b53c2f", - "version": 2, + "version": 3, "weekStart": "" -} \ No newline at end of file +} diff --git a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml index 7c8ce00fdcdc..2ae3f9c7757a 100644 --- a/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml +++ b/examples/flower-via-docker-compose/config/provisioning/datasources/prometheus-datasource.yml @@ -1,9 +1,9 @@ apiVersion: 1 datasources: -- name: Prometheus - type: prometheus - access: proxy - uid: db69454e-e558-479e-b4fc-80db52bf91da - url: http://host.docker.internal:9090 - isDefault: true + - name: Prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: true + uid: db69454e-e558-479e-b4fc-80db52bf91da diff --git a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py index 4067439a4544..8aecc583ed92 100644 --- a/examples/flower-via-docker-compose/helpers/generate_docker_compose.py +++ b/examples/flower-via-docker-compose/helpers/generate_docker_compose.py @@ -31,7 +31,6 @@ def create_docker_compose(args): ] docker_compose_content = f""" -version: '3' services: prometheus: image: prom/prometheus:latest @@ -63,7 +62,7 @@ def create_docker_compose(args): - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro - /dev/disk/:/dev/disk:ro - - /var/run/docker.sock:/var/run/docker.sock + - /var/run/docker.sock:/var/run/docker.sock grafana: image: grafana/grafana:latest @@ -84,7 +83,6 @@ def create_docker_compose(args): command: - --config=/etc/grafana/grafana.ini - server: container_name: server build: @@ -96,11 +94,12 @@ def create_docker_compose(args): DOCKER_HOST_IP: host.docker.internal volumes: - .:/app - - /var/run/docker.sock:/var/run/docker.sock + - /var/run/docker.sock:/var/run/docker.sock ports: - "6000:6000" - "8265:8265" - "8000:8000" + stop_signal: SIGINT depends_on: - prometheus - grafana @@ -134,6 +133,7 @@ def create_docker_compose(args): FLASK_RUN_PORT: {6000 + i} container_name: client{i} DOCKER_HOST_IP: host.docker.internal + stop_signal: SIGINT """ docker_compose_content += "volumes:\n grafana-storage:\n" diff --git a/examples/flowertune-llm/README.md b/examples/flowertune-llm/README.md new file mode 100644 index 000000000000..51cae73ae88a --- /dev/null +++ b/examples/flowertune-llm/README.md @@ -0,0 +1,118 @@ +--- +tags: [llm, nlp, LLama] +dataset: [Alpaca-GPT4] +framework: [PEFT, torch] +--- + +# FlowerTune LLM: Federated LLM Fine-tuning with Flower + +Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. +However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. +Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. + +This introductory example conducts federated instruction tuning with pretrained [OpenLLaMA](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. +We implement FlowerTune LLM by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, +which allows users to perform the training on a single GPU. + +## Set up the project + +Start by cloning the example project: + +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flowertune-llm . \ + && rm -rf _tmp \ + && cd flowertune-llm +``` + +This will create a new directory called `flowertune-llm` with the following structure: + +```shell +flowertune-llm +├── flowertune_llm +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── dataset.py # Defines your dataset and tokenizer +│ └── models.py # Defines your models +│ +├── pyproject.toml # Project metadata like dependencies and configs +├── test.py # Test pre-trained model +└── README.md +``` + +### Install dependencies and project + +Install the dependencies defined in `pyproject.toml` as well as the `flowertune_llm` package. + +```bash +pip install -e . +``` + +## Run the project + +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. + +### Run with the Simulation Engine + +```bash +flwr run . +``` + +This command will run FL simulations with a 4-bit [OpenLLaMA 3Bv2](https://huggingface.co/openlm-research/open_llama_3b_v2) model involving 2 clients per rounds for 100 FL rounds. You can override configuration parameters directly from the command line. Below are a few settings you might want to test: + +```bash +# Use OpenLLaMA-7B instead of 3B and 8-bits quantization +flwr run . --run-config "model.name='openlm-research/open_llama_7b_v2' model.quantization=8" + +# Run for 50 rounds but increasing the fraction of clients that participate per round to 25% +flwr run . --run-config "num-server-rounds=50 strategy.fraction-fit=0.25" +``` + +### Run with the Deployment Engine + +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. + +## Expected results + +![](_static/train_loss_smooth.png) + +As expected, OpenLLaMA-7B model works better than its 3B version with lower training loss. With the hyperparameters tested, the 8-bit model seems to deliver lower training loss for the smaller 3B model compared to its 4-bit version. + +## VRAM consumption + +| Models | 7-billion (8-bit) | 7-billion (4-bit) | 3-billion (8-bit) | 3-billion (4-bit) | +| :----: | :---------------: | :---------------: | :---------------: | :---------------: | +| VRAM | ~22.00 GB | ~16.50 GB | ~13.50 GB | ~10.60 GB | + +We make use of the [bitsandbytes](https://huggingface.co/docs/bitsandbytes/main/en/index) library in conjunction with [PEFT](https://huggingface.co/docs/peft/en/index) to derive LLMs that can be fine-tuned efficiently. +The above table shows the VRAM consumption per client for the different models considered in this example. +You can adjust the CPU/GPU resources you assign to each of the clients based on your device. +For example, it is easy to train 2 concurrent clients on each GPU (24 GB VRAM) if you choose 3-billion (4-bit) model. +Assigning 50% of the GPU's VRAM to each client by setting `options.backend.clientapp-gpus = 0.5` under `[tool.flwr.federations.local-simulation]` in `pyproject.toml`. + +## Test with your Questions + +We provide a script to test your trained model by passing your specified questions. For example: + +```bash +python test.py --peft-path=/path/to/trained-model-dir/ \ + --question="What is the ideal 1-day plan in London?" +``` + +An answer generated from federated trained 7-billion (8-bit) OpenLLaMA model: + +``` +Great choice. +London has so much to offer, and you can really soak up all the sights and sounds in just a single day. +Here's a suggested itinerary for you. +Start your day off with a hearty breakfast at an authentic British diner. +Then head to the iconic Big Ben and the Houses of Parliament to learn about the history of the city. +Next, make your way to Westminster Abbey to see the many historical monuments and memorials. +From there, cross the river Thames to the Tower of London, which is home to the Crown Jewels of England and Scotland. +Finally, end your day with a relaxing visit to the London Eye, the tallest Ferris wheel in Europe, for a beautiful view of the city. +``` + +The [`Vicuna`](https://huggingface.co/lmsys/vicuna-13b-v1.1) template we used in this example is for a chat assistant. +The generated answer is expected to be a multi-turn conversations. Feel free to try more interesting questions! diff --git a/examples/llm-flowertune/_static/train_loss_smooth.png b/examples/flowertune-llm/_static/train_loss_smooth.png similarity index 100% rename from examples/llm-flowertune/_static/train_loss_smooth.png rename to examples/flowertune-llm/_static/train_loss_smooth.png diff --git a/examples/flowertune-llm/flowertune_llm/__init__.py b/examples/flowertune-llm/flowertune_llm/__init__.py new file mode 100644 index 000000000000..e786a4d4b73d --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/__init__.py @@ -0,0 +1 @@ +"""flowertune_llm.""" diff --git a/examples/flowertune-llm/flowertune_llm/client_app.py b/examples/flowertune-llm/flowertune_llm/client_app.py new file mode 100644 index 000000000000..b61a733b29cf --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/client_app.py @@ -0,0 +1,126 @@ +"""flowertune-llm: A Flower / FlowerTune app.""" + +import os +import warnings +from typing import Dict, Tuple + +import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import unflatten_dict +from flwr.common.typing import NDArrays, Scalar +from omegaconf import DictConfig + +from transformers import TrainingArguments +from trl import SFTTrainer + +from flowertune_llm.dataset import ( + get_tokenizer_and_data_collator_and_propt_formatting, + load_data, + replace_keys, +) +from flowertune_llm.models import ( + cosine_annealing, + get_model, + set_parameters, + get_parameters, +) + +# Avoid warnings +os.environ["TOKENIZERS_PARALLELISM"] = "true" +os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" +warnings.filterwarnings("ignore", category=UserWarning) + + +# pylint: disable=too-many-arguments +# pylint: disable=too-many-instance-attributes +class FlowerClient(NumPyClient): + """Standard Flower client for CNN training.""" + + def __init__( + self, + model_cfg: DictConfig, + train_cfg: DictConfig, + trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ): # pylint: disable=too-many-arguments + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.train_cfg = train_cfg + self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) + self.tokenizer = tokenizer + self.formatting_prompts_func = formatting_prompts_func + self.data_collator = data_collator + self.num_rounds = num_rounds + self.trainset = trainset + + # instantiate model + self.model = get_model(model_cfg) + + def fit( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, int, Dict]: + """Implement distributed fit function for a given client.""" + set_parameters(self.model, parameters) + + new_lr = cosine_annealing( + int(config["current_round"]), + self.num_rounds, + self.train_cfg.learning_rate_max, + self.train_cfg.learning_rate_min, + ) + + self.training_argumnets.learning_rate = new_lr + self.training_argumnets.output_dir = config["save_path"] + + # Construct trainer + trainer = SFTTrainer( + model=self.model, + tokenizer=self.tokenizer, + args=self.training_argumnets, + max_seq_length=self.train_cfg.seq_length, + train_dataset=self.trainset, + formatting_func=self.formatting_prompts_func, + data_collator=self.data_collator, + ) + + # Do local training + results = trainer.train() + + return ( + get_parameters(self.model), + len(self.trainset), + {"train_loss": results.training_loss}, + ) + + +def client_fn(context: Context) -> FlowerClient: + """Create a Flower client representing a single organization.""" + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Let's get the client partition + client_trainset = load_data(partition_id, num_partitions, cfg.dataset.name) + ( + tokenizer, + data_collator, + formatting_prompts_func, + ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) + + return FlowerClient( + cfg.model, + cfg.train, + client_trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/examples/llm-flowertune/dataset.py b/examples/flowertune-llm/flowertune_llm/dataset.py similarity index 53% rename from examples/llm-flowertune/dataset.py rename to examples/flowertune-llm/flowertune_llm/dataset.py index 571be31f7fba..87595b3f9ccd 100644 --- a/examples/llm-flowertune/dataset.py +++ b/examples/flowertune-llm/flowertune_llm/dataset.py @@ -1,6 +1,11 @@ from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM +from flwr_datasets.partitioner import IidPartitioner +from flwr_datasets import FederatedDataset + +FDS = None # Cache FederatedDataset + def formatting_prompts_func(example): output_texts = [] @@ -27,3 +32,31 @@ def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str): ) return tokenizer, data_collator, formatting_prompts_func + + +def load_data(partition_id: int, num_partitions: int, dataset_name: str): + """Load partition data.""" + # Only initialize `FederatedDataset` once + global FDS + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset=dataset_name, + partitioners={"train": partitioner}, + ) + client_trainset = FDS.load_partition(partition_id, "train") + client_trainset = client_trainset.rename_column("output", "response") + + return client_trainset + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/examples/llm-flowertune/models.py b/examples/flowertune-llm/flowertune_llm/models.py similarity index 68% rename from examples/llm-flowertune/models.py rename to examples/flowertune-llm/flowertune_llm/models.py index f32c800cf2c1..e1609caeb2fc 100644 --- a/examples/llm-flowertune/models.py +++ b/examples/flowertune-llm/flowertune_llm/models.py @@ -2,10 +2,18 @@ import torch from omegaconf import DictConfig -from peft import LoraConfig, get_peft_model +from collections import OrderedDict +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + set_peft_model_state_dict, +) from peft.utils import prepare_model_for_kbit_training from transformers import AutoModelForCausalLM, BitsAndBytesConfig +from flwr.common.typing import NDArrays + def cosine_annealing( current_round: int, @@ -53,3 +61,17 @@ def get_model(model_cfg: DictConfig): ) return get_peft_model(model, peft_config) + + +def set_parameters(model, parameters: NDArrays) -> None: + """Change the parameters of the model using the given ones.""" + peft_state_dict_keys = get_peft_model_state_dict(model).keys() + params_dict = zip(peft_state_dict_keys, parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + set_peft_model_state_dict(model, state_dict) + + +def get_parameters(model) -> NDArrays: + """Return the parameters of the current net.""" + state_dict = get_peft_model_state_dict(model) + return [val.cpu().numpy() for _, val in state_dict.items()] diff --git a/examples/flowertune-llm/flowertune_llm/server_app.py b/examples/flowertune-llm/flowertune_llm/server_app.py new file mode 100644 index 000000000000..ff0da90c8b9b --- /dev/null +++ b/examples/flowertune-llm/flowertune_llm/server_app.py @@ -0,0 +1,94 @@ +"""flowertune-llm: A Flower / FlowerTune app.""" + +import os +from datetime import datetime + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.config import unflatten_dict +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from omegaconf import DictConfig + +from flowertune_llm.models import get_model, get_parameters, set_parameters +from flowertune_llm.dataset import replace_keys + + +# Get function that will be executed by the strategy's evaluate() method +# Here we use it to save global model checkpoints +def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): + """Return an evaluation function for saving global model.""" + + def evaluate(server_round: int, parameters, config): + # Save model + if server_round != 0 and ( + server_round == total_round or server_round % save_every_round == 0 + ): + # Init model + model = get_model(model_cfg) + set_parameters(model, parameters) + + model.save_pretrained(f"{save_path}/peft_{server_round}") + + return 0.0, {} + + return evaluate + + +def get_on_fit_config(save_path): + """Return a function that will be used to construct the config that the client's + fit() method will receive.""" + + def fit_config_fn(server_round: int): + fit_config = {} + fit_config["current_round"] = server_round + fit_config["save_path"] = save_path + return fit_config + + return fit_config_fn + + +def fit_weighted_average(metrics): + """Aggregate (federated) evaluation metrics.""" + # Multiply accuracy of each client by number of examples used + losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"train_loss": sum(losses) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Create output directory given current timestamp + current_time = datetime.now() + folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(os.getcwd(), f"results/{folder_name}") + os.makedirs(save_path, exist_ok=True) + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Get initial model weights + init_model = get_model(cfg.model) + init_model_parameters = get_parameters(init_model) + init_model_parameters = ndarrays_to_parameters(init_model_parameters) + + # Define strategy + strategy = FedAvg( + fraction_fit=cfg.strategy.fraction_fit, + fraction_evaluate=cfg.strategy.fraction_evaluate, + on_fit_config_fn=get_on_fit_config(save_path), + fit_metrics_aggregation_fn=fit_weighted_average, + initial_parameters=init_model_parameters, + evaluate_fn=get_evaluate_fn( + cfg.model, cfg.train.save_every_round, num_rounds, save_path + ), + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Flower ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/flowertune-llm/pyproject.toml b/examples/flowertune-llm/pyproject.toml new file mode 100644 index 000000000000..4925f3cba15a --- /dev/null +++ b/examples/flowertune-llm/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "flowertune-llm" +version = "1.0.0" +description = "FlowerTune LLM: Federated LLM Fine-tuning with Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets>=0.3.0", + "trl==0.8.1", + "bitsandbytes==0.43.0", + "scipy==1.13.0", + "peft==0.6.2", + "fschat[model_worker,webui]==0.2.35", + "transformers==4.39.3", + "sentencepiece==0.2.0", + "omegaconf==2.3.0", + "hf_transfer==0.1.8", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "flowertune_llm.server_app:app" +clientapp = "flowertune_llm.client_app:app" + +[tool.flwr.app.config] +dataset.name = "vicgalle/alpaca-gpt4" +model.name = "openlm-research/open_llama_3b_v2" +model.quantization = 4 +model.gradient-checkpointing = true +model.lora.peft-lora-r = 32 +model.lora.peft-lora-alpha = 64 +train.save-every-round = 5 +train.learning-rate-max = 5e-5 +train.learning-rate-min = 1e-6 +train.seq-length = 512 +train.training-arguments.output-dir = "" +train.training-arguments.learning-rate = "" +train.training-arguments.per-device-train-batch-size = 16 +train.training-arguments.gradient-accumulation-steps = 1 +train.training-arguments.logging-steps = 10 +train.training-arguments.num-train-epochs = 3 +train.training-arguments.max-steps = 10 +train.training-arguments.save-steps = 1000 +train.training-arguments.save-total-limit = 10 +train.training-arguments.gradient-checkpointing = true +train.training-arguments.lr-scheduler-type = "constant" +strategy.fraction-fit = 0.1 +strategy.fraction-evaluate = 0.0 +num-server-rounds = 100 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 20 +options.backend.client-resources.num-cpus = 8 +options.backend.client-resources.num-gpus = 1.0 diff --git a/examples/llm-flowertune/test.py b/examples/flowertune-llm/test.py similarity index 100% rename from examples/llm-flowertune/test.py rename to examples/flowertune-llm/test.py diff --git a/examples/vit-finetune/README.md b/examples/flowertune-vit/README.md similarity index 56% rename from examples/vit-finetune/README.md rename to examples/flowertune-vit/README.md index 957c0eda0b68..48327880f412 100644 --- a/examples/vit-finetune/README.md +++ b/examples/flowertune-vit/README.md @@ -1,68 +1,78 @@ --- -title: Federated finetuning of a ViT -tags: [finetuneing, vision, fds] +tags: [finetuning, vision, fds] dataset: [Oxford Flower-102] framework: [torch, torchvision] --- -# Federated finetuning of a ViT +# Federated Finetuning of a Vision Transformer with Flower -This example shows how to use Flower's Simulation Engine to federate the finetuning of a Vision Transformer ([ViT-Base-16](https://pytorch.org/vision/main/models/generated/torchvision.models.vit_b_16.html#torchvision.models.vit_b_16)) that has been pretrained on ImageNet. To keep things simple we'll be finetuning it to [Oxford Flower-102](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) datasset, creating 20 partitions using [Flower Datasets](https://flower.ai/docs/datasets/). We'll be finetuning just the exit `head` of the ViT, this means that the training is not that costly and each client requires just ~1GB of VRAM (for a batch size of 32 images). +This example shows how to use Flower's Simulation Engine to federate the finetuning of a Vision Transformer ([ViT-Base-16](https://pytorch.org/vision/main/models/generated/torchvision.models.vit_b_16.html#torchvision.models.vit_b_16)) that has been pretrained on ImageNet. To keep things simple we'll be finetuning it to [Oxford Flower-102](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) datasset, creating 20 partitions using [Flower Datasets](https://flower.ai/docs/datasets/). We'll be finetuning just the exit `head` of the ViT, this means that the training is not that costly and each client requires just ~1GB of VRAM (for a batch size of 32 images) if you choose to use a GPU. -## Running the example +## Set up the project -If you haven't cloned the Flower repository already you might want to clone code example and discard the rest. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project + +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/vit-finetune . && rm -rf flower && cd vit-finetune +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/flowertune-vit . \ + && rm -rf _tmp \ + && cd flowertune-vit ``` -This will create a new directory called `vit-finetune` containing the following files: +This will create a new directory called `flowertune-vit` with the following structure: +```shell +flowertune-vit +├── vitexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` --- README.md <- Your're reading this right now --- main.py <- Main file that launches the simulation --- client.py <- Contains Flower client code and ClientApp --- server.py <- Contains Flower server code and ServerApp --- model.py <- Defines model and train/eval functions --- dataset.py <- Downloads, partitions and processes dataset --- pyproject.toml <- Example dependencies, installable using Poetry --- requirements.txt <- Example dependencies, installable using pip -``` - -### Installing Dependencies -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `vitexample` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -#### pip +## Run the project -With an activated environemnt, install the dependencies for this example: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -pip install -r requirements.txt +### Run with the Simulation Engine + +> \[!TIP\] +> This example runs faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. + +```bash +# Run with the default federation (CPU only) +flwr run . ``` -### Run with `start_simulation()` +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: + +```bash +flwr run . --run-config "num-server-rounds=5 batch-size=64" +``` -Running the example is quite straightforward. You can control the number of rounds `--num-rounds` (which defaults to 20). +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 5x`ClientApp` will run in parallel in the available GPU. You can tweak the degree of parallelism by adjusting the settings of this federation in the `pyproject.toml`. ```bash -python main.py +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu ``` ![](_static/central_evaluation.png) Running the example as-is on an RTX 3090Ti should take ~15s/round running 5 clients in parallel (plus the _global model_ during centralized evaluation stages) in a single GPU. Note that more clients could fit in VRAM, but since the GPU utilization is high (99%-100%) we are probably better off not doing that (at least in this case). -You can adjust the `client_resources` passed to `start_simulation()` so more/less clients run at the same time in the GPU. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. - ```bash +---------------------------------------------------------------------------------------+ | NVIDIA-SMI 535.161.07 Driver Version: 535.161.07 CUDA Version: 12.2 | @@ -90,12 +100,7 @@ You can adjust the `client_resources` passed to `start_simulation()` so more/les +---------------------------------------------------------------------------------------+ ``` -### Run with Flower Next (preview) +### Run with the Deployment Engine -```bash -flower-simulation \ - --client-app=client:app \ - --server-app=server:app \ - --num-supernodes=20 \ - --backend-config='{"client_resources": {"num_cpus":4, "num_gpus":0.25}}' -``` +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/vit-finetune/_static/central_evaluation.png b/examples/flowertune-vit/_static/central_evaluation.png similarity index 100% rename from examples/vit-finetune/_static/central_evaluation.png rename to examples/flowertune-vit/_static/central_evaluation.png diff --git a/examples/flowertune-vit/pyproject.toml b/examples/flowertune-vit/pyproject.toml new file mode 100644 index 000000000000..bf280de8af95 --- /dev/null +++ b/examples/flowertune-vit/pyproject.toml @@ -0,0 +1,43 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "vitexample" +version = "1.0.0" +description = "Federated Finetuning of a Vision Transformer with Flower" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "vitexample.server_app:app" +clientapp = "vitexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +batch-size = 32 +learning-rate = 0.01 +dataset-name = "nelorth/oxford-flowers" +num-classes = 102 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 + +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 2 # each ClientApp assumes to use 2CPUs +options.backend.client-resources.num-gpus = 0.2 # at most 5 ClientApp will run in a given GPU diff --git a/examples/flowertune-vit/vitexample/__init__.py b/examples/flowertune-vit/vitexample/__init__.py new file mode 100644 index 000000000000..f0ce539fac90 --- /dev/null +++ b/examples/flowertune-vit/vitexample/__init__.py @@ -0,0 +1 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" diff --git a/examples/flowertune-vit/vitexample/client_app.py b/examples/flowertune-vit/vitexample/client_app.py new file mode 100644 index 000000000000..59143f1d25f8 --- /dev/null +++ b/examples/flowertune-vit/vitexample/client_app.py @@ -0,0 +1,62 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" + +import torch +from torch.utils.data import DataLoader + +from flwr.common import Context +from flwr.client import NumPyClient, ClientApp + + +from vitexample.task import apply_train_transforms, get_dataset_partition +from vitexample.task import get_model, set_params, get_params, train + + +class FedViTClient(NumPyClient): + def __init__(self, trainloader, learning_rate, num_classes): + self.trainloader = trainloader + self.learning_rate = learning_rate + self.model = get_model(num_classes) + + # Determine device + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.model.to(self.device) # send model to device + + def fit(self, parameters, config): + set_params(self.model, parameters) + + # Set optimizer + optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate) + # Train locally + avg_train_loss = train( + self.model, self.trainloader, optimizer, epochs=1, device=self.device + ) + # Return locally-finetuned part of the model + return ( + get_params(self.model), + len(self.trainloader.dataset), + {"train_loss": avg_train_loss}, + ) + + +def client_fn(context: Context): + """Return a FedViTClient.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + dataset_name = context.run_config["dataset-name"] + trainpartition = get_dataset_partition(num_partitions, partition_id, dataset_name) + + batch_size = context.run_config["batch-size"] + lr = context.run_config["learning-rate"] + num_classes = context.run_config["num-classes"] + trainset = trainpartition.with_transform(apply_train_transforms) + + trainloader = DataLoader( + trainset, batch_size=batch_size, num_workers=2, shuffle=True + ) + + return FedViTClient(trainloader, lr, num_classes).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/flowertune-vit/vitexample/server_app.py b/examples/flowertune-vit/vitexample/server_app.py new file mode 100644 index 000000000000..f37215df5eb9 --- /dev/null +++ b/examples/flowertune-vit/vitexample/server_app.py @@ -0,0 +1,77 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" + +from logging import INFO + +import torch +from datasets import Dataset, load_dataset +from torch.utils.data import DataLoader + +from vitexample.task import apply_eval_transforms +from vitexample.task import get_model, set_params, test, get_params + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.logger import log +from flwr.server import ServerApp, ServerConfig, ServerAppComponents +from flwr.server.strategy import FedAvg + + +def get_evaluate_fn( + centralized_testset: Dataset, + num_classes: int, +): + """Return an evaluation function for centralized evaluation.""" + + def evaluate(server_round, parameters, config): + """Use the entire Oxford Flowers-102 test set for evaluation.""" + + # Determine device + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + # Instantiate model and apply current global parameters + model = get_model(num_classes) + set_params(model, parameters) + model.to(device) + + # Apply transform to dataset + testset = centralized_testset.with_transform(apply_eval_transforms) + + testloader = DataLoader(testset, batch_size=128) + # Run evaluation + loss, accuracy = test(model, testloader, device=device) + log(INFO, f"round: {server_round} -> acc: {accuracy:.4f}, loss: {loss: .4f}") + + return loss, {"accuracy": accuracy} + + return evaluate + + +def server_fn(context: Context): + + # Define tested for central evaluation + dataset_name = context.run_config["dataset-name"] + dataset = load_dataset(dataset_name) + test_set = dataset["test"] + + # Set initial global model + num_classes = context.run_config["num-classes"] + ndarrays = get_params(get_model(num_classes)) + init_parameters = ndarrays_to_parameters(ndarrays) + + # Configure the strategy + strategy = FedAvg( + fraction_fit=0.5, # Sample 50% of available clients + fraction_evaluate=0.0, # No federated evaluation + evaluate_fn=get_evaluate_fn( + test_set, num_classes + ), # Global evaluation function + initial_parameters=init_parameters, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/flowertune-vit/vitexample/task.py b/examples/flowertune-vit/vitexample/task.py new file mode 100644 index 000000000000..3512d1891db2 --- /dev/null +++ b/examples/flowertune-vit/vitexample/task.py @@ -0,0 +1,131 @@ +"""vitexample: A Flower / PyTorch app with Vision Transformers.""" + +from collections import OrderedDict + +import torch +from torchvision.models import vit_b_16, ViT_B_16_Weights +from torchvision.transforms import ( + Compose, + Normalize, + ToTensor, + RandomResizedCrop, + Resize, + CenterCrop, +) + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +def get_model(num_classes: int): + """Return a pretrained ViT with all layers frozen except output head.""" + + # Instantiate a pre-trained ViT-B on ImageNet + model = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1) + + # We're going to federated the finetuning of this model + # using (by default) the Oxford Flowers-102 dataset. One easy way + # to achieve this is by re-initializing the output block of the + # ViT so it outputs 102 clases instead of the default 1k + in_features = model.heads[-1].in_features + model.heads[-1] = torch.nn.Linear(in_features, num_classes) + + # Disable gradients for everything + model.requires_grad_(False) + # Now enable just for output head + model.heads.requires_grad_(True) + + return model + + +def set_params(model, parameters): + """Apply the parameters to model head.""" + finetune_layers = model.heads + params_dict = zip(finetune_layers.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + finetune_layers.load_state_dict(state_dict, strict=True) + + +def get_params(model): + """Get parameters from model head as ndarrays.""" + finetune_layers = model.heads + return [val.cpu().numpy() for _, val in finetune_layers.state_dict().items()] + + +def train(net, trainloader, optimizer, epochs, device): + """Train the model on the training set.""" + criterion = torch.nn.CrossEntropyLoss() + net.train() + net.to(device) + avg_loss = 0 + # A very standard training loop for image classification + for _ in range(epochs): + for batch in trainloader: + images, labels = batch["image"].to(device), batch["label"].to(device) + optimizer.zero_grad() + loss = criterion(net(images), labels) + avg_loss += loss.item() / labels.shape[0] + loss.backward() + optimizer.step() + + return avg_loss / len(trainloader) + + +def test(net, testloader, device: str): + """Validate the network on the entire test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + net.to(device) + net.eval() + with torch.no_grad(): + for data in testloader: + images, labels = data["image"].to(device), data["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + _, predicted = torch.max(outputs.data, 1) + correct += (predicted == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +fds = None + + +def get_dataset_partition(num_partitions: int, partition_id: int, dataset_name: str): + """Get Oxford Flowers datasets and partition it.""" + global fds + if fds is None: + # Get dataset (by default Oxford Flowers-102) and create IID partitions + partitioner = IidPartitioner(num_partitions) + fds = FederatedDataset( + dataset=dataset_name, partitioners={"train": partitioner} + ) + + return fds.load_partition(partition_id) + + +def apply_eval_transforms(batch): + """Apply a very standard set of image transforms.""" + transforms = Compose( + [ + Resize((256, 256)), + CenterCrop((224, 224)), + ToTensor(), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + batch["image"] = [transforms(img) for img in batch["image"]] + return batch + + +def apply_train_transforms(batch): + """Apply a very standard set of image transforms.""" + transforms = Compose( + [ + RandomResizedCrop((224, 224)), + ToTensor(), + Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] + ) + batch["image"] = [transforms(img) for img in batch["image"]] + return batch diff --git a/examples/ios/pyproject.toml b/examples/ios/pyproject.toml index 2e55b14cf761..03ea89ea3e54 100644 --- a/examples/ios/pyproject.toml +++ b/examples/ios/pyproject.toml @@ -9,5 +9,5 @@ description = "Example Server for Flower iOS/CoreML" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" diff --git a/examples/llm-flowertune/README.md b/examples/llm-flowertune/README.md deleted file mode 100644 index 46076e0b2078..000000000000 --- a/examples/llm-flowertune/README.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Federated LLM Fine-tuning with Flower -tags: [llm, nlp, LLama2] -dataset: [Alpaca-GPT4] -framework: [PEFT, torch] ---- - -# LLM FlowerTune: Federated LLM Fine-tuning with Flower - -Large language models (LLMs), which have been trained on vast amounts of publicly accessible data, have shown remarkable effectiveness in a wide range of areas. -However, despite the fact that more data typically leads to improved performance, there is a concerning prospect that the supply of high-quality public data will deplete within a few years. -Federated LLM training could unlock access to an endless pool of distributed private data by allowing multiple data owners to collaboratively train a shared model without the need to exchange raw data. - -This introductory example conducts federated instruction tuning with pretrained [LLama2](https://huggingface.co/openlm-research) models on [Alpaca-GPT4](https://huggingface.co/datasets/vicgalle/alpaca-gpt4) dataset. -We implement LLM FlowerTune by integrating a bundle of techniques: 1) We use [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the dataset. 2) The fine-tuning is done using the [🤗PEFT](https://huggingface.co/docs/peft/en/index) library. 3) We use Flower's Simulation Engine to simulate the LLM fine-tuning process in federated way, -which allows users to perform the training on a single GPU. - -## Environment Setup - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/llm-flowertune . && rm -rf flower && cd llm-flowertune -``` - -This will create a new directory called `llm-flowertune` containing the following files: - -``` --- README.md <- Your're reading this right now --- main.py <- Start fed-LLM simulation --- client.py <- Flower client constructor --- model.py <- Model build --- dataset.py <- Dataset and tokenizer build --- utils.py <- Utility functions --- test.py <- Test pre-trained model --- app.py <- ServerApp/ClientApp for Flower-Next --- conf/config.yaml <- Configuration file --- requirements.txt <- Example dependencies -``` - -### Installing dependencies - -Project dependencies are defined in `requirements.txt`. Install them with: - -```shell -pip install -r requirements.txt -``` - -## Run LLM Fine-tuning - -With an activated Python environment, run the example with default config values. The config is in `conf/config.yaml` and is loaded automatically. - -```bash -# Run with default config -python main.py -``` - -This command will run FL simulations with a 4-bit [OpenLLaMA 7Bv2](https://huggingface.co/openlm-research/open_llama_7b_v2) model involving 2 clients per rounds for 100 FL rounds. You can override configuration parameters directly from the command line. Below are a few settings you might want to test: - -```bash -# Use OpenLLaMA-3B instead of 7B and 8-bits quantization -python main.py model.name="openlm-research/open_llama_3b_v2" model.quantization=8 - -# Run for 50 rounds but increasing the fraction of clients that participate per round to 25% -python main.py num_rounds=50 fraction_fit.fraction_fit=0.25 -``` - -## Expected Results - -![](_static/train_loss_smooth.png) - -As expected, LLama2-7B model works better than its 3B version with lower training loss. With the hyperparameters tested, the 8-bit model seems to deliver lower training loss for the smaller 3B model compared to its 4-bit version. - -You can run all 8 experiments with a single command as: - -```bash -python main.py --multirun model.name="openlm-research/open_llama_7b_v2","openlm-research/open_llama_3b_v2" model.quantization=8,4 strategy.fraction_fit=0.1,0.2 -``` - -## VRAM Consumption - -| Models | 7-billion (8-bit) | 7-billion (4-bit) | 3-billion (8-bit) | 3-billion (4-bit) | -| :----: | :---------------: | :---------------: | :---------------: | :---------------: | -| VRAM | ~22.00 GB | ~16.50 GB | ~13.50 GB | ~10.60 GB | - -We make use of the [bitsandbytes](https://huggingface.co/docs/bitsandbytes/main/en/index) library in conjunction with [PEFT](https://huggingface.co/docs/peft/en/index) to derive LLMs that can be fine-tuned efficiently. -The above table shows the VRAM consumption per client for the different models considered in this example. -You can adjust the CPU/GPU resources you assign to each of the clients based on your device. -For example, it is easy to train 2 concurrent clients on each GPU (24 GB VRAM) if you choose 3-billion (4-bit) model. - -```bash -# This will assign 50% of the GPU's VRAM to each client. -python main.py model.name="openlm-research/open_llama_3b_v2" model.quantization=4 client_resources.num_gpus=0.5 -``` - -## Test with your Questions - -We provide a script to test your trained model by passing your specified questions. For example: - -```bash -python test.py --peft-path=/path/to/trained-model-dir/ \ - --question="What is the ideal 1-day plan in London?" -``` - -An answer generated from federated trained 7-billion (8-bit) LLama2 model: - -``` -Great choice. -London has so much to offer, and you can really soak up all the sights and sounds in just a single day. -Here's a suggested itinerary for you. -Start your day off with a hearty breakfast at an authentic British diner. -Then head to the iconic Big Ben and the Houses of Parliament to learn about the history of the city. -Next, make your way to Westminster Abbey to see the many historical monuments and memorials. -From there, cross the river Thames to the Tower of London, which is home to the Crown Jewels of England and Scotland. -Finally, end your day with a relaxing visit to the London Eye, the tallest Ferris wheel in Europe, for a beautiful view of the city. -``` - -The [`Vicuna`](https://huggingface.co/lmsys/vicuna-13b-v1.1) template we used in this example is for a chat assistant. -The generated answer is expected to be a multi-turn conversations. Feel free to try more interesting questions! - -## Run with Flower Next (preview) - -We conduct a 2-client setting to demonstrate how to run federated LLM fine-tuning with Flower Next. -Please follow the steps below: - -1. Start the long-running Flower server (SuperLink) - ```bash - flower-superlink --insecure - ``` -2. Start the long-running Flower client (SuperNode) - ```bash - # In a new terminal window, start the first long-running Flower client: - flower-client-app app:client1 --insecure - ``` - ```bash - # In another new terminal window, start the second long-running Flower client: - flower-client-app app:client2 --insecure - ``` -3. Run the Flower App - ```bash - # With both the long-running server (SuperLink) and two clients (SuperNode) up and running, - # we can now run the actual Flower App: - flower-server-app app:server --insecure - ``` diff --git a/examples/llm-flowertune/app.py b/examples/llm-flowertune/app.py deleted file mode 100644 index db6595c94d31..000000000000 --- a/examples/llm-flowertune/app.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -import warnings - -import flwr as fl -from flwr_datasets import FederatedDataset -from hydra import compose, initialize - -from client import gen_client_fn -from dataset import get_tokenizer_and_data_collator_and_propt_formatting -from utils import fit_weighted_average, get_on_fit_config - -warnings.filterwarnings("ignore", category=UserWarning) - -NUM_ROUNDS = 100 -save_path = "./results/" - -with initialize(config_path="conf"): - cfg = compose(config_name="config") - -# Reset the number of number -cfg.num_rounds = NUM_ROUNDS -cfg.train.num_rounds = NUM_ROUNDS - -# Create output directory -if not os.path.exists(save_path): - os.mkdir(save_path) - -# Partition dataset and get dataloaders -# We set the number of partitions to 20 for fast processing. -fds = FederatedDataset( - dataset=cfg.dataset.name, partitioners={"train": cfg.num_clients} -) -( - tokenizer, - data_collator, - formatting_prompts_func, -) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) - - -# ClientApp for client #1 (Flower Next) -client1 = fl.client.ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - partition_id=0, - api=True, - ), -) - - -# ClientApp for client #2 (Flower Next) -client2 = fl.client.ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - partition_id=1, - api=True, - ), -) - - -# Instantiate strategy. -strategy = fl.server.strategy.FedAvg( - min_available_clients=2, # Simulate a 2-client setting - fraction_fit=1.0, - fraction_evaluate=0.0, # no client evaluation - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) diff --git a/examples/llm-flowertune/client.py b/examples/llm-flowertune/client.py deleted file mode 100644 index c81333f664b3..000000000000 --- a/examples/llm-flowertune/client.py +++ /dev/null @@ -1,129 +0,0 @@ -from collections import OrderedDict -from typing import Callable, Dict, Tuple - -import flwr as fl -import torch -from flwr.common.typing import NDArrays, Scalar -from omegaconf import DictConfig -from peft import get_peft_model_state_dict, set_peft_model_state_dict -from transformers import TrainingArguments -from trl import SFTTrainer - -from models import cosine_annealing, get_model - - -# pylint: disable=too-many-arguments -class FlowerClient( - fl.client.NumPyClient -): # pylint: disable=too-many-instance-attributes - """Standard Flower client for CNN training.""" - - def __init__( - self, - model_cfg: DictConfig, - train_cfg: DictConfig, - trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ): # pylint: disable=too-many-arguments - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.train_cfg = train_cfg - self.training_argumnets = TrainingArguments(**train_cfg.training_arguments) - self.tokenizer = tokenizer - self.formatting_prompts_func = formatting_prompts_func - self.data_collator = data_collator - self.save_path = save_path - - # instantiate model - self.model = get_model(model_cfg) - - self.trainset = trainset - - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: - """Return the parameters of the current net.""" - - state_dict = get_peft_model_state_dict(self.model) - return [val.cpu().numpy() for _, val in state_dict.items()] - - def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict]: - """Implement distributed fit function for a given client.""" - set_parameters(self.model, parameters) - - new_lr = cosine_annealing( - int(config["current_round"]), - self.train_cfg.num_rounds, - self.train_cfg.learning_rate_max, - self.train_cfg.learning_rate_min, - ) - - self.training_argumnets.learning_rate = new_lr - self.training_argumnets.output_dir = self.save_path - - # Construct trainer - trainer = SFTTrainer( - model=self.model, - tokenizer=self.tokenizer, - args=self.training_argumnets, - max_seq_length=self.train_cfg.seq_length, - train_dataset=self.trainset, - formatting_func=self.formatting_prompts_func, - data_collator=self.data_collator, - ) - - # Do local training - results = trainer.train() - - return ( - self.get_parameters({}), - len(self.trainset), - {"train_loss": results.training_loss}, - ) - - -def set_parameters(model, parameters: NDArrays) -> None: - """Change the parameters of the model using the given ones.""" - peft_state_dict_keys = get_peft_model_state_dict(model).keys() - params_dict = zip(peft_state_dict_keys, parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - set_peft_model_state_dict(model, state_dict) - - -def gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - model_cfg: DictConfig, - train_cfg: DictConfig, - save_path: str, - partition_id: int = 0, - api: bool = False, -) -> Callable[[str], FlowerClient]: # pylint: disable=too-many-arguments - """Generate the client function that creates the Flower Clients.""" - - def client_fn(cid: str) -> FlowerClient: - """Create a Flower client representing a single organization.""" - - # Let's get the partition corresponding to the i-th client - client_trainset = ( - fds.load_partition(partition_id, "train") - if api - else fds.load_partition(int(cid), "train") - ) - client_trainset = client_trainset.rename_column("output", "response") - - return FlowerClient( - model_cfg, - train_cfg, - client_trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ).to_client() - - return client_fn diff --git a/examples/llm-flowertune/conf/config.yaml b/examples/llm-flowertune/conf/config.yaml deleted file mode 100644 index 0b769d351479..000000000000 --- a/examples/llm-flowertune/conf/config.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# Federated Instruction Tuning on General Dataset ---- - -num_clients: 20 # total number of clients -num_rounds: 100 - -dataset: - name: "vicgalle/alpaca-gpt4" - -model: - name: "openlm-research/open_llama_7b_v2" - quantization: 4 # 8 or 4 if you want to do quantization with BitsAndBytes - gradient_checkpointing: True - lora: - peft_lora_r: 32 - peft_lora_alpha: 64 - -train: - num_rounds: ${num_rounds} - save_every_round: 5 - learning_rate_max: 5e-5 - learning_rate_min: 1e-6 - seq_length: 512 - training_arguments: - output_dir: null # to be set by hydra - learning_rate: null # to be set by the client - per_device_train_batch_size: 16 - gradient_accumulation_steps: 1 - logging_steps: 10 - num_train_epochs: 3 - max_steps: 10 - report_to: null - save_steps: 1000 - save_total_limit: 10 - gradient_checkpointing: ${model.gradient_checkpointing} - lr_scheduler_type: "constant" - -strategy: - _target_: flwr.server.strategy.FedAvg - fraction_fit: 0.1 # sample 10% of clients (i.e. 2 per round) - fraction_evaluate: 0.0 # no client evaluation - -client_resources: - num_cpus: 8 - num_gpus: 1.0 diff --git a/examples/llm-flowertune/main.py b/examples/llm-flowertune/main.py deleted file mode 100644 index ec8308601efb..000000000000 --- a/examples/llm-flowertune/main.py +++ /dev/null @@ -1,92 +0,0 @@ -import pickle -import warnings - -import flwr as fl -import hydra -from flwr_datasets import FederatedDataset -from hydra.core.hydra_config import HydraConfig -from hydra.utils import instantiate -from omegaconf import DictConfig, OmegaConf - -from client import gen_client_fn -from dataset import get_tokenizer_and_data_collator_and_propt_formatting -from utils import fit_weighted_average, get_evaluate_fn, get_on_fit_config - -warnings.filterwarnings("ignore", category=UserWarning) - - -@hydra.main(config_path="conf", config_name="config", version_base=None) -def main(cfg: DictConfig) -> None: - """Run federated LLM fine-tuning. - - Parameters - ---------- - cfg : DictConfig - An omegaconf object that stores the hydra config. - """ - # Print config structured as YAML - print(OmegaConf.to_yaml(cfg)) - - # Partition dataset and get dataloaders - fds = FederatedDataset( - dataset=cfg.dataset.name, partitioners={"train": cfg.num_clients} - ) - ( - tokenizer, - data_collator, - formatting_prompts_func, - ) = get_tokenizer_and_data_collator_and_propt_formatting( - cfg.model.name, - ) - - # Hydra automatically creates an output directory - # Let's retrieve it and save some results there - save_path = HydraConfig.get().runtime.output_dir - - # Prepare function that will be used to spawn each client - client_fn = gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - ) - - # Instantiate strategy according to config. Here we pass other arguments - # that are only defined at run time. - strategy = instantiate( - cfg.strategy, - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, - evaluate_fn=get_evaluate_fn( - cfg.model, cfg.train.save_every_round, cfg.num_rounds, save_path - ), - ) - - # Start simulation - history = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=cfg.num_clients, - config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), - client_resources={ - "num_cpus": cfg.client_resources.num_cpus, - "num_gpus": cfg.client_resources.num_gpus, - }, - strategy=strategy, - ) - - # Experiment completed. Now we save the results and - # generate plots using the `history` - print("................") - print(history) - - # Save results as a Python pickle using a file_path - # the directory created by Hydra for each run - with open(f"{save_path}/results.pkl", "wb") as f: - pickle.dump(history, f) - - -if __name__ == "__main__": - main() diff --git a/examples/llm-flowertune/requirements.txt b/examples/llm-flowertune/requirements.txt deleted file mode 100644 index 2d0e65da3615..000000000000 --- a/examples/llm-flowertune/requirements.txt +++ /dev/null @@ -1,10 +0,0 @@ -flwr[rest,simulation]>=1.8.0, <2.0 -flwr-datasets>=0.0.2 -hydra-core==1.3.2 -trl==0.7.2 -bitsandbytes==0.41.3 -scipy==1.11.2 -peft==0.4.0 -fschat[model_worker,webui]==0.2.35 -transformers==4.38.1 -hf_transfer==0.1.8 diff --git a/examples/llm-flowertune/utils.py b/examples/llm-flowertune/utils.py deleted file mode 100644 index bbb607810537..000000000000 --- a/examples/llm-flowertune/utils.py +++ /dev/null @@ -1,43 +0,0 @@ -from client import set_parameters -from models import get_model - - -# Get function that will be executed by the strategy's evaluate() method -# Here we use it to save global model checkpoints -def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): - """Return an evaluation function for saving global model.""" - - def evaluate(server_round: int, parameters, config): - # Save model - if server_round != 0 and ( - server_round == total_round or server_round % save_every_round == 0 - ): - # Init model - model = get_model(model_cfg) - set_parameters(model, parameters) - - model.save_pretrained(f"{save_path}/peft_{server_round}") - - return 0.0, {} - - return evaluate - - -# Get a function that will be used to construct the config that the client's -# fit() method will receive -def get_on_fit_config(): - def fit_config_fn(server_round: int): - fit_config = {"current_round": server_round} - return fit_config - - return fit_config_fn - - -def fit_weighted_average(metrics): - """Aggregation function for (federated) evaluation metrics.""" - # Multiply accuracy of each client by number of examples used - losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"train_loss": sum(losses) / sum(examples)} diff --git a/examples/opacus/README.md b/examples/opacus/README.md index aea5d0f689fe..d08f534f878e 100644 --- a/examples/opacus/README.md +++ b/examples/opacus/README.md @@ -1,5 +1,5 @@ --- -tags: [dp, security, fds] +tags: [DP, DP-SGD, basic, vision, fds, privacy] dataset: [CIFAR-10] framework: [opacus, torch] --- @@ -10,57 +10,54 @@ In this example, we demonstrate how to train a model with differential privacy ( For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about Opacus, visit the official [website](https://opacus.ai/). -## Environments Setup +## Set up the project -Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project + +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/opacus . && rm -rf flower && cd opacus +git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/opacus . \ + && rm -rf flower \ + && cd opacus ``` This will create a new directory called `opacus` containing the following files: ```shell --- pyproject.toml --- client.py --- server.py --- README.md +opacus +├── opacus_fl +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing dependencies +### Install dependencies and project -Project dependencies are defined in `pyproject.toml`. Install them with: +Install the dependencies defined in `pyproject.toml` as well as the `opacus_fl` package. From a new python environment, run: ```shell -pip install . +pip install -e . ``` -## Run Flower with Opacus and Pytorch - -### 1. Start the long-running Flower server (SuperLink) - -```bash -flower-superlink --insecure -``` +## Run the project -### 2. Start the long-running Flower clients (SuperNodes) +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: +### Run with the Simulation Engine ```bash -flower-client-app client:appA --insecure +flwr run . ``` -```bash -flower-client-app client:appB --insecure -``` - -Opacus hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively. - -### 3. Run the Flower App - -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-server-app server:app --insecure +flwr run . --run-config "max-grad-norm=1.0 num-server-rounds=5" ``` + +> \[!NOTE\] +> Please note that, at the current state, users cannot set `NodeConfig` for simulated `ClientApp`s. For this reason, the hyperparameter `noise_multiplier` is set in the `client_fn` method based on a condition check on `partition_id`. This will be modified in a future version of Flower to allow users to set `NodeConfig` for simulated `ClientApp`s. diff --git a/examples/opacus/client.py b/examples/opacus/client.py deleted file mode 100644 index 2771a5d78bcc..000000000000 --- a/examples/opacus/client.py +++ /dev/null @@ -1,171 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import torch -import torch.nn as nn -import torch.nn.functional as F -from flwr.client import ClientApp, NumPyClient -from flwr_datasets import FederatedDataset -from opacus import PrivacyEngine -from torch.utils.data import DataLoader -from torchvision.transforms import Compose, Normalize, ToTensor -from tqdm import tqdm - -warnings.filterwarnings("ignore", category=UserWarning) - -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -class Net(nn.Module): - """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" - - def __init__(self) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(3, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 5 * 5, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, 10) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 5 * 5) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - return self.fc3(x) - - -def train(net, train_loader, privacy_engine, optimizer, target_delta, epochs=1): - criterion = torch.nn.CrossEntropyLoss() - for _ in range(epochs): - for batch in tqdm(train_loader, "Training"): - images = batch["img"] - labels = batch["label"] - optimizer.zero_grad() - criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() - optimizer.step() - - epsilon = privacy_engine.get_epsilon(delta=target_delta) - return epsilon - - -def test(net, test_loader): - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - with torch.no_grad(): - for batch in tqdm(test_loader, "Testing"): - images = batch["img"].to(DEVICE) - labels = batch["label"].to(DEVICE) - outputs = net(images) - loss += criterion(outputs, labels).item() - correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() - accuracy = correct / len(test_loader.dataset) - return loss, accuracy - - -def load_data(partition_id): - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 2}) - partition = fds.load_partition(partition_id) - # Divide data on each node: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - pytorch_transforms = Compose( - [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] - ) - - def apply_transforms(batch): - batch["img"] = [pytorch_transforms(img) for img in batch["img"]] - return batch - - partition_train_test = partition_train_test.with_transform(apply_transforms) - train_loader = DataLoader( - partition_train_test["train"], batch_size=32, shuffle=True - ) - test_loader = DataLoader(partition_train_test["test"], batch_size=32) - return train_loader, test_loader - - -class FlowerClient(NumPyClient): - def __init__( - self, - model, - train_loader, - test_loader, - target_delta, - noise_multiplier, - max_grad_norm, - ) -> None: - super().__init__() - self.test_loader = test_loader - self.optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) - self.privacy_engine = PrivacyEngine(secure_mode=False) - self.target_delta = target_delta - ( - self.model, - self.optimizer, - self.train_loader, - ) = self.privacy_engine.make_private( - module=model, - optimizer=self.optimizer, - data_loader=train_loader, - noise_multiplier=noise_multiplier, - max_grad_norm=max_grad_norm, - ) - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(self.model.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.model.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - epsilon = train( - self.model, - self.train_loader, - self.privacy_engine, - self.optimizer, - self.target_delta, - ) - - if epsilon is not None: - print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}") - else: - print("Epsilon value not available.") - return (self.get_parameters(config={}), len(self.train_loader), {}) - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.model, self.test_loader) - return loss, len(self.test_loader.dataset), {"accuracy": accuracy} - - -def client_fn_parameterized( - partition_id, target_delta=1e-5, noise_multiplier=1.3, max_grad_norm=1.0 -): - def client_fn(cid: str): - net = Net().to(DEVICE) - train_loader, test_loader = load_data(partition_id=partition_id) - return FlowerClient( - net, - train_loader, - test_loader, - target_delta, - noise_multiplier, - max_grad_norm, - ).to_client() - - return client_fn - - -appA = ClientApp( - client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.5), -) - -appB = ClientApp( - client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1), -) diff --git a/examples/opacus/opacus_fl/__init__.py b/examples/opacus/opacus_fl/__init__.py new file mode 100644 index 000000000000..91006b32e386 --- /dev/null +++ b/examples/opacus/opacus_fl/__init__.py @@ -0,0 +1 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" diff --git a/examples/opacus/opacus_fl/client_app.py b/examples/opacus/opacus_fl/client_app.py new file mode 100644 index 000000000000..631e99092789 --- /dev/null +++ b/examples/opacus/opacus_fl/client_app.py @@ -0,0 +1,92 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" + +import warnings + +import torch +from opacus import PrivacyEngine +from opacus_fl.task import Net, get_weights, load_data, set_weights, test, train +import logging + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +warnings.filterwarnings("ignore", category=UserWarning) + + +class FlowerClient(NumPyClient): + def __init__( + self, + train_loader, + test_loader, + target_delta, + noise_multiplier, + max_grad_norm, + ) -> None: + super().__init__() + self.model = Net() + self.train_loader = train_loader + self.test_loader = test_loader + self.target_delta = target_delta + self.noise_multiplier = noise_multiplier + self.max_grad_norm = max_grad_norm + + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + model = self.model + set_weights(model, parameters) + + optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + + privacy_engine = PrivacyEngine(secure_mode=False) + ( + model, + optimizer, + self.train_loader, + ) = privacy_engine.make_private( + module=model, + optimizer=optimizer, + data_loader=self.train_loader, + noise_multiplier=self.noise_multiplier, + max_grad_norm=self.max_grad_norm, + ) + + epsilon = train( + model, + self.train_loader, + privacy_engine, + optimizer, + self.target_delta, + device=self.device, + ) + + if epsilon is not None: + print(f"Epsilon value for delta={self.target_delta} is {epsilon:.2f}") + else: + print("Epsilon value not available.") + + return (get_weights(model), len(self.train_loader.dataset), {}) + + def evaluate(self, parameters, config): + set_weights(self.model, parameters) + loss, accuracy = test(self.model, self.test_loader, self.device) + return loss, len(self.test_loader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + noise_multiplier = 1.0 if partition_id % 2 == 0 else 1.5 + + train_loader, test_loader = load_data( + partition_id=partition_id, num_partitions=context.node_config["num-partitions"] + ) + return FlowerClient( + train_loader, + test_loader, + context.run_config["target-delta"], + noise_multiplier, + context.run_config["max-grad-norm"], + ).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/opacus/opacus_fl/server_app.py b/examples/opacus/opacus_fl/server_app.py new file mode 100644 index 000000000000..2c105d36df41 --- /dev/null +++ b/examples/opacus/opacus_fl/server_app.py @@ -0,0 +1,37 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" + +import logging +from typing import List, Tuple + +from opacus_fl.task import Net, get_weights + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +# Opacus logger seems to change the flwr logger to DEBUG level. Set back to INFO +logging.getLogger("flwr").setLevel(logging.INFO) + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + num_rounds = context.run_config["num-server-rounds"] + + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + strategy = FedAvg( + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/opacus/opacus_fl/task.py b/examples/opacus/opacus_fl/task.py new file mode 100644 index 000000000000..0c7ef71dc50b --- /dev/null +++ b/examples/opacus/opacus_fl/task.py @@ -0,0 +1,102 @@ +"""opacus: Training with Sample-Level Differential Privacy using Opacus Privacy Engine.""" + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor +from tqdm import tqdm + +fds = None # Cache FederatedDataset + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def get_weights(net): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + +def load_data(partition_id: int, num_partitions: int): + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + + partition = fds.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + train_loader = DataLoader( + partition_train_test["train"], batch_size=32, shuffle=True + ) + test_loader = DataLoader(partition_train_test["test"], batch_size=32) + return train_loader, test_loader + + +def train(net, train_loader, privacy_engine, optimizer, target_delta, device, epochs=1): + criterion = torch.nn.CrossEntropyLoss() + net.to(device) + net.train() + for _ in range(epochs): + for batch in tqdm(train_loader, "Training"): + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + criterion(net(images.to(device)), labels.to(device)).backward() + optimizer.step() + + epsilon = privacy_engine.get_epsilon(delta=target_delta) + return epsilon + + +def test(net, test_loader, device): + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in tqdm(test_loader, "Testing"): + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(test_loader.dataset) + return loss, accuracy diff --git a/examples/opacus/pyproject.toml b/examples/opacus/pyproject.toml index 0aaa167d0a28..4814709569ef 100644 --- a/examples/opacus/pyproject.toml +++ b/examples/opacus/pyproject.toml @@ -3,20 +3,35 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "opacus-fl" -version = "0.1.0" -description = "Sample Differential Privacy with Opacus in Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "opacus_fl" +version = "1.0.0" +description = "Sample-level Differential Privacy with Opacus in Flower" + dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.0.2,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "torch==2.1.1", "torchvision==0.16.1", - "tqdm==4.65.0", - "opacus==v1.4.1" + "opacus==v1.4.1", ] [tool.hatch.build.targets.wheel] -packages = ["."] \ No newline at end of file +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "opacus_fl.server_app:app" +clientapp = "opacus_fl.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +target-delta = 1e-5 +max-grad-norm = 1.0 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/opacus/server.py b/examples/opacus/server.py deleted file mode 100644 index 68c1c027d3d6..000000000000 --- a/examples/opacus/server.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics -from flwr.server import ServerApp, ServerConfig -from flwr.server.strategy import FedAvg - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - return {"accuracy": sum(accuracies) / sum(examples)} - - -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -config = ServerConfig(num_rounds=3) - -app = ServerApp( - config=config, - strategy=strategy, -) diff --git a/examples/pytorch-federated-variational-autoencoder/pyproject.toml b/examples/pytorch-federated-variational-autoencoder/pyproject.toml index 5109eaf4d2e2..ade08a639f2b 100644 --- a/examples/pytorch-federated-variational-autoencoder/pyproject.toml +++ b/examples/pytorch-federated-variational-autoencoder/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Variational Autoencoder Example with PyTorch and Flower" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/pytorch-from-centralized-to-federated/pyproject.toml b/examples/pytorch-from-centralized-to-federated/pyproject.toml index 3d1559e3a515..57a8082fd6bf 100644 --- a/examples/pytorch-from-centralized-to-federated/pyproject.toml +++ b/examples/pytorch-from-centralized-to-federated/pyproject.toml @@ -9,7 +9,7 @@ description = "PyTorch: From Centralized To Federated with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } torch = "1.13.1" diff --git a/examples/quickstart-fastai/pyproject.toml b/examples/quickstart-fastai/pyproject.toml index 4d160bae0eec..34b817f84e41 100644 --- a/examples/quickstart-fastai/pyproject.toml +++ b/examples/quickstart-fastai/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with Fastai and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "fastai==2.7.14", "torch==2.2.0", diff --git a/examples/quickstart-huggingface/README.md b/examples/quickstart-huggingface/README.md index fa4330040ea7..124689441656 100644 --- a/examples/quickstart-huggingface/README.md +++ b/examples/quickstart-huggingface/README.md @@ -4,77 +4,76 @@ dataset: [IMDB] framework: [transformers] --- -# Federated HuggingFace Transformers using Flower and PyTorch +# Federated Learning with HuggingFace Transformers and Flower (Quickstart Example) -This introductory example to using [HuggingFace](https://huggingface.co) Transformers with Flower with PyTorch. This example has been extended from the [quickstart-pytorch](https://flower.ai/docs/examples/quickstart-pytorch.html) example. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for a detailed explanation of the transformer pipeline. +This introductory example to using [🤗Transformers](https://huggingface.co/docs/transformers/en/index) with Flower. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for a detailed explanation of the transformer pipeline. -Like `quickstart-pytorch`, running this example in itself is also meant to be quite easy. +In this example, we will federated the training of a [BERT-tiny](https://huggingface.co/prajjwal1/bert-tiny) modle on the [IMDB](https://huggingface.co/datasets/stanfordnlp/imdb) dataset. The data will be downloaded and partitioned using [Flower Datasets](https://flower.ai/docs/datasets/). This example runs best when a GPU is available. -## Project Setup +## Set up the project + +### Clone the project Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-huggingface . && rm -rf flower && cd quickstart-huggingface +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-huggingface . \ + && rm -rf _tmp && cd quickstart-huggingface ``` This will create a new directory called `quickstart-huggingface` containing the following files: ```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md +quickstart-huggingface +├── huggingface_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `huggingface_example` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +## Run the Example -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -#### pip +### Run with the Simulation Engine -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +> \[!TIP\] +> This example runs faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. -```shell -pip install -r requirements.txt +```bash +# Run with the default federation (CPU only) +flwr run . ``` -## Run Federated Learning with Flower +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 4x`ClientApp` (using ~1 GB of VRAM each) will run in parallel in each available GPU. Note you can adjust the degree of paralellism but modifying the `client-resources` specification. -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -python3 server.py +```bash +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example -```shell -python3 client.py --partition-id 0 +```bash +flwr run --run-config "num-server-rounds=5 fraction-fit=0.1" ``` -Start client 2 in the second terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart 🤗Transformers tutorial](https://flower.ai/docs/framework/tutorial-quickstart-huggingface.html) -```shell -python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that PyTorch is starting a federated training. +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-huggingface/client.py b/examples/quickstart-huggingface/client.py deleted file mode 100644 index b880119d1c7c..000000000000 --- a/examples/quickstart-huggingface/client.py +++ /dev/null @@ -1,129 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import flwr as fl -import torch -from evaluate import load as load_metric -from flwr_datasets import FederatedDataset -from torch.optim import AdamW -from torch.utils.data import DataLoader -from transformers import ( - AutoModelForSequenceClassification, - AutoTokenizer, - DataCollatorWithPadding, -) - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cpu") -CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint - - -def load_data(partition_id): - """Load IMDB data (training and eval)""" - fds = FederatedDataset(dataset="imdb", partitioners={"train": 1_000}) - partition = fds.load_partition(partition_id) - # Divide data: 80% train, 20% test - partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT, model_max_length=512) - - def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) - - partition_train_test = partition_train_test.map(tokenize_function, batched=True) - partition_train_test = partition_train_test.remove_columns("text") - partition_train_test = partition_train_test.rename_column("label", "labels") - - data_collator = DataCollatorWithPadding(tokenizer=tokenizer) - trainloader = DataLoader( - partition_train_test["train"], - shuffle=True, - batch_size=32, - collate_fn=data_collator, - ) - - testloader = DataLoader( - partition_train_test["test"], batch_size=32, collate_fn=data_collator - ) - - return trainloader, testloader - - -def train(net, trainloader, epochs): - optimizer = AdamW(net.parameters(), lr=5e-5) - net.train() - for _ in range(epochs): - for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} - outputs = net(**batch) - loss = outputs.loss - loss.backward() - optimizer.step() - optimizer.zero_grad() - - -def test(net, testloader): - metric = load_metric("accuracy") - loss = 0 - net.eval() - for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} - with torch.no_grad(): - outputs = net(**batch) - logits = outputs.logits - loss += outputs.loss.item() - predictions = torch.argmax(logits, dim=-1) - metric.add_batch(predictions=predictions, references=batch["labels"]) - loss /= len(testloader.dataset) - accuracy = metric.compute()["accuracy"] - return loss, accuracy - - -def main(partition_id): - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) - - trainloader, testloader = load_data(partition_id) - - # Flower client - class IMDBClient(fl.client.NumPyClient): - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - print("Training Started...") - train(net, trainloader, epochs=1) - print("Training Finished.") - return self.get_parameters(config={}), len(trainloader), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(net, testloader) - return float(loss), len(testloader), {"accuracy": float(accuracy)} - - # Start client - fl.client.start_client( - server_address="127.0.0.1:8080", client=IMDBClient().to_client() - ) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - choices=list(range(1_000)), - required=True, - type=int, - help="Partition of the dataset divided into 1,000 iid partitions created " - "artificially.", - ) - partition_id = parser.parse_args().partition_id - main(partition_id) diff --git a/examples/quickstart-huggingface/huggingface_example/__init__.py b/examples/quickstart-huggingface/huggingface_example/__init__.py new file mode 100644 index 000000000000..6d897650c6bf --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/__init__.py @@ -0,0 +1 @@ +"""huggingface_example: A Flower / Hugging Face app.""" diff --git a/examples/quickstart-huggingface/huggingface_example/client_app.py b/examples/quickstart-huggingface/huggingface_example/client_app.py new file mode 100644 index 000000000000..8989e52281ad --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/client_app.py @@ -0,0 +1,58 @@ +"""huggingface_example: A Flower / Hugging Face app.""" + +import warnings + +import torch +from flwr.client import Client, ClientApp, NumPyClient +from flwr.common import Context +from transformers import logging +from huggingface_example.task import ( + train, + test, + load_data, + set_params, + get_params, + get_model, +) + +warnings.filterwarnings("ignore", category=FutureWarning) + +# To mute warnings reminding that we need to train the model to a downstream task +# This is something this example does. +logging.set_verbosity_error() + + +# Flower client +class IMDBClient(NumPyClient): + def __init__(self, model_name, trainloader, testloader) -> None: + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.trainloader = trainloader + self.testloader = testloader + self.net = get_model(model_name) + self.net.to(self.device) + + def fit(self, parameters, config) -> tuple[list, int, dict]: + set_params(self.net, parameters) + train(self.net, self.trainloader, epochs=1, device=self.device) + return get_params(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config) -> tuple[float, int, dict[str, float]]: + set_params(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, device=self.device) + return float(loss), len(self.testloader), {"accuracy": float(accuracy)} + + +def client_fn(context: Context) -> Client: + """Construct a Client that will be run in a ClientApp.""" + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Read the run config to get settings to configure the Client + model_name = context.run_config["model-name"] + trainloader, testloader = load_data(partition_id, num_partitions, model_name) + + return IMDBClient(model_name, trainloader, testloader).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-huggingface/huggingface_example/server_app.py b/examples/quickstart-huggingface/huggingface_example/server_app.py new file mode 100644 index 000000000000..d0db1b43fa36 --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/server_app.py @@ -0,0 +1,33 @@ +"""huggingface_example: A Flower / Hugging Face app.""" + +from flwr.common import Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from huggingface_example.task import get_params, get_model + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components for ServerApp.""" + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + # Set global model initialization + model_name = context.run_config["model-name"] + ndarrays = get_params(get_model(model_name)) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define strategy + fraction_fit = context.run_config["fraction-fit"] + fraction_evaluate = context.run_config["fraction-evaluate"] + strategy = FedAvg( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + initial_parameters=global_model_init, + ) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-huggingface/huggingface_example/task.py b/examples/quickstart-huggingface/huggingface_example/task.py new file mode 100644 index 000000000000..1c5b8d087dca --- /dev/null +++ b/examples/quickstart-huggingface/huggingface_example/task.py @@ -0,0 +1,105 @@ +"""huggingface_example: A Flower / Hugging Face app.""" + +from typing import Any +from collections import OrderedDict + +import torch +from evaluate import load as load_metric +from torch.optim import AdamW +from torch.utils.data import DataLoader +from transformers import ( + AutoTokenizer, + DataCollatorWithPadding, + AutoModelForSequenceClassification, +) +from datasets.utils.logging import disable_progress_bar +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +disable_progress_bar() +fds = None # Cache FederatedDataset + + +def load_data( + partition_id: int, num_partitions: int, model_name: str +) -> tuple[DataLoader[Any], DataLoader[Any]]: + """Load IMDB data (training and eval)""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + # Partition the IMDB dataset into N partitions + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="stanfordnlp/imdb", partitioners={"train": partitioner} + ) + partition = fds.load_partition(partition_id) + # Divide data: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length=512) + + def tokenize_function(examples): + return tokenizer(examples["text"], truncation=True, add_special_tokens=True) + + partition_train_test = partition_train_test.map(tokenize_function, batched=True) + partition_train_test = partition_train_test.remove_columns("text") + partition_train_test = partition_train_test.rename_column("label", "labels") + + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + trainloader = DataLoader( + partition_train_test["train"], + shuffle=True, + batch_size=32, + collate_fn=data_collator, + ) + + testloader = DataLoader( + partition_train_test["test"], batch_size=32, collate_fn=data_collator + ) + + return trainloader, testloader + + +def get_model(model_name): + return AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2) + + +def get_params(model): + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, parameters) -> None: + params_dict = zip(model.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) + + +def train(net, trainloader, epochs, device) -> None: + optimizer = AdamW(net.parameters(), lr=5e-5) + net.train() + for _ in range(epochs): + for batch in trainloader: + batch = {k: v.to(device) for k, v in batch.items()} + outputs = net(**batch) + loss = outputs.loss + loss.backward() + optimizer.step() + optimizer.zero_grad() + + +def test(net, testloader, device) -> tuple[Any | float, Any]: + metric = load_metric("accuracy") + loss = 0 + net.eval() + for batch in testloader: + batch = {k: v.to(device) for k, v in batch.items()} + with torch.no_grad(): + outputs = net(**batch) + logits = outputs.logits + loss += outputs.loss.item() + predictions = torch.argmax(logits, dim=-1) + metric.add_batch(predictions=predictions, references=batch["labels"]) + loss /= len(testloader.dataset) + accuracy = metric.compute()["accuracy"] + return loss, accuracy diff --git a/examples/quickstart-huggingface/pyproject.toml b/examples/quickstart-huggingface/pyproject.toml index 2b46804d7b45..f13c48d96cae 100644 --- a/examples/quickstart-huggingface/pyproject.toml +++ b/examples/quickstart-huggingface/pyproject.toml @@ -1,22 +1,49 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-huggingface" -version = "0.1.0" -description = "Hugging Face Transformers Federated Learning Quickstart with Flower" +[project] +name = "huggingface_example" +version = "1.0.0" +description = "Federated Learning with Hugginface Transformers and Flower (Quickstart Example)" +license = "Apache-2.0" authors = [ - "The Flower Authors ", - "Kaushik Amar Das ", + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets>=0.3.0", + "torch==2.4.0", + "transformers>=4.30.0,<5.0", + "evaluate>=0.4.0,<1.0", + "datasets>=2.0.0, <3.0", + "scikit-learn>=1.3.1, <2.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "huggingface_example.server_app:app" +clientapp = "huggingface_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +model-name = "prajjwal1/bert-tiny" +fraction-fit = 0.05 +fraction-evaluate = 0.1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 100 -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = ">=0.0.2,<1.0.0" -torch = ">=1.13.1,<2.0" -transformers = ">=4.30.0,<5.0" -evaluate = ">=0.4.0,<1.0" -datasets = ">=2.0.0, <3.0" -scikit-learn = ">=1.3.1, <2.0" +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 100 +options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApp will run in a given GPU (lower it to increase parallelism) diff --git a/examples/quickstart-huggingface/requirements.txt b/examples/quickstart-huggingface/requirements.txt deleted file mode 100644 index 3cd5735625ba..000000000000 --- a/examples/quickstart-huggingface/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets>=0.0.2, <1.0.0 -torch>=1.13.1, <2.0 -transformers>=4.30.0, <5.0 -evaluate>=0.4.0, <1.0 -datasets>=2.0.0, <3.0 -scikit-learn>=1.3.1, <2.0 diff --git a/examples/quickstart-huggingface/run.sh b/examples/quickstart-huggingface/run.sh deleted file mode 100755 index fa989eab1471..000000000000 --- a/examples/quickstart-huggingface/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id ${i}& -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-huggingface/server.py b/examples/quickstart-huggingface/server.py deleted file mode 100644 index 4eeb9da7da75..000000000000 --- a/examples/quickstart-huggingface/server.py +++ /dev/null @@ -1,15 +0,0 @@ -import flwr as fl - -if __name__ == "__main__": - # Define strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=1.0, - fraction_evaluate=1.0, - ) - - # Start server - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, - ) diff --git a/examples/quickstart-jax/README.md b/examples/quickstart-jax/README.md index b47f3a82e13b..98f9ec8e7901 100644 --- a/examples/quickstart-jax/README.md +++ b/examples/quickstart-jax/README.md @@ -1,85 +1,67 @@ --- tags: [quickstart, linear regression] dataset: [Synthetic] -framework: [JAX] +framework: [JAX, FLAX] --- -# JAX: From Centralized To Federated +# Federated Learning with JAX and Flower (Quickstart Example) -This example demonstrates how an already existing centralized JAX-based machine learning project can be federated with Flower. +This introductory example to Flower uses JAX, but deep knowledge of JAX is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [FLAX](https://flax.readthedocs.io/en/latest/index.html) to define and train a small CNN model. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MINST dataset. -This introductory example for Flower uses JAX, but you're not required to be a JAX expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing JAX project. +## Set up the project -## Project Setup +### Clone the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-jax . && rm -rf flower && cd quickstart-jax +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-jax . \ + && rm -rf _tmp \ + && cd quickstart-jax ``` -This will create a new directory called `quickstart-jax`, containing the following files: +This will create a new directory called `quickstart-jax` with the following structure: ```shell --- pyproject.toml --- requirements.txt --- jax_training.py --- client.py --- server.py --- README.md +quickstart-jax +├── jaxexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies +### Install dependencies and project -Project dependencies (such as `jax` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +Install the dependencies defined in `pyproject.toml` as well as the `jaxexample` package. -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt +```bash +pip install -e . ``` -## Run JAX Federated +## Run the project -This JAX example is based on the [Linear Regression with JAX](https://coax.readthedocs.io/en/latest/examples/linear_regression/jax.html) tutorial and uses a sklearn dataset (generating a random dataset for a regression problem). Feel free to consult the tutorial if you want to get a better understanding of JAX. If you play around with the dataset, please keep in mind that the data samples are generated randomly depending on the settings being done while calling the dataset function. Please checkout out the [scikit-learn tutorial for further information](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_regression.html). The file `jax_training.py` contains all the steps that are described in the tutorial. It loads the train and test dataset and a linear regression model, trains the model with the training set, and evaluates the trained model on the test set. +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -The only things we need are a simple Flower server (in `server.py`) and a Flower client (in `client.py`). The Flower client basically takes model and training code tells Flower how to call it. +### Run with the Simulation Engine -Start the server in a terminal as follows: - -```shell -python3 server.py +```bash +flwr run . ``` -Now that the server is running and waiting for clients, we can start two clients that will participate in the federated learning process. To do so simply open two more terminal windows and run the following commands. +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -Start client 1 in the first terminal: - -```shell -python3 client.py +```bash +flwr run . --run-config "num-server-rounds=5 batch-size=32" ``` -Start client 2 in the second terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart JAX tutorial](https://flower.ai/docs/framework/tutorial-quickstart-jax.html) -```shell -python3 client.py -``` +### Run with the Deployment Engine -You are now training a JAX-based linear regression model, federated across two clients. The setup is of course simplified since both clients hold a similar dataset, but you can now continue with your own explorations. How about changing from a linear regression to a more sophisticated model? How about adding more clients? +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-jax/client.py b/examples/quickstart-jax/client.py deleted file mode 100644 index 4a2aaf0e5a93..000000000000 --- a/examples/quickstart-jax/client.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Flower client example using JAX for linear regression.""" - -from typing import Callable, Dict, List, Tuple - -import flwr as fl -import jax -import jax.numpy as jnp -import jax_training -import numpy as np - -# Load data and determine model shape -train_x, train_y, test_x, test_y = jax_training.load_data() -grad_fn = jax.grad(jax_training.loss_fn) -model_shape = train_x.shape[1:] - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self): - self.params = jax_training.load_model(model_shape) - - def get_parameters(self, config): - parameters = [] - for _, val in self.params.items(): - parameters.append(np.array(val)) - return parameters - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - for key, value in list(zip(self.params.keys(), parameters)): - self.params[key] = value - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - self.set_parameters(parameters) - self.params, loss, num_examples = jax_training.train( - self.params, grad_fn, train_x, train_y - ) - parameters = self.get_parameters(config={}) - return parameters, num_examples, {"loss": float(loss)} - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[float, int, Dict]: - self.set_parameters(parameters) - loss, num_examples = jax_training.evaluation( - self.params, grad_fn, test_x, test_y - ) - return float(loss), num_examples, {"loss": float(loss)} - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", client=FlowerClient().to_client() -) diff --git a/examples/quickstart-jax/jax_training.py b/examples/quickstart-jax/jax_training.py deleted file mode 100644 index f57db75d5963..000000000000 --- a/examples/quickstart-jax/jax_training.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Linear Regression with JAX. - -This code examples is based on the following code example: -https://coax.readthedocs.io/en/latest/examples/linear_regression/jax.html - -If you have any questions concerning the linear regression used with jax -please read the JAX documentation or the mentioned tutorial. -""" - -from typing import Callable, Dict, List, Tuple - -import jax -import jax.numpy as jnp -import numpy as np -from sklearn.datasets import make_regression -from sklearn.model_selection import train_test_split - -key = jax.random.PRNGKey(0) - - -def load_data() -> ( - Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List[np.ndarray]] -): - # Load dataset - X, y = make_regression(n_features=3, random_state=0) - X, X_test, y, y_test = train_test_split(X, y) - return X, y, X_test, y_test - - -def load_model(model_shape) -> Dict: - # Extract model parameters - params = {"b": jax.random.uniform(key), "w": jax.random.uniform(key, model_shape)} - return params - - -def loss_fn(params, X, y) -> Callable: - # Return MSE as loss - err = jnp.dot(X, params["w"]) + params["b"] - y - return jnp.mean(jnp.square(err)) - - -def train(params, grad_fn, X, y) -> Tuple[np.array, float, int]: - num_examples = X.shape[0] - for epochs in range(50): - grads = grad_fn(params, X, y) - params = jax.tree_map(lambda p, g: p - 0.05 * g, params, grads) - loss = loss_fn(params, X, y) - if epochs % 10 == 0: - print(f"For Epoch {epochs} loss {loss}") - return params, loss, num_examples - - -def evaluation(params, grad_fn, X_test, y_test) -> Tuple[float, int]: - num_examples = X_test.shape[0] - err_test = loss_fn(params, X_test, y_test) - loss_test = jnp.mean(jnp.square(err_test)) - return loss_test, num_examples - - -def main(): - X, y, X_test, y_test = load_data() - model_shape = X.shape[1:] - grad_fn = jax.grad(loss_fn) - print("Model Shape", model_shape) - params = load_model(model_shape) - print("Params", params) - params, loss, num_examples = train(params, grad_fn, X, y) - print("Training loss:", loss) - loss, num_examples = evaluation(params, grad_fn, X_test, y_test) - print("Evaluation loss:", loss) - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-jax/jaxexample/__init__.py b/examples/quickstart-jax/jaxexample/__init__.py new file mode 100644 index 000000000000..f04ba7eccc81 --- /dev/null +++ b/examples/quickstart-jax/jaxexample/__init__.py @@ -0,0 +1 @@ +"""jaxexample: A Flower / JAX app.""" diff --git a/examples/quickstart-jax/jaxexample/client_app.py b/examples/quickstart-jax/jaxexample/client_app.py new file mode 100644 index 000000000000..915b0d4f16be --- /dev/null +++ b/examples/quickstart-jax/jaxexample/client_app.py @@ -0,0 +1,66 @@ +"""jaxexample: A Flower / JAX app.""" + +import numpy as np +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context + +from jaxexample.task import ( + apply_model, + create_train_state, + get_params, + load_data, + set_params, + train, +) + + +# Define Flower Client and client_fn +class FlowerClient(NumPyClient): + def __init__(self, train_state, trainset, testset): + self.train_state = train_state + self.trainset, self.testset = trainset, testset + + def fit(self, parameters, config): + self.train_state = set_params(self.train_state, parameters) + self.train_state, loss, acc = train(self.train_state, self.trainset) + params = get_params(self.train_state.params) + return ( + params, + len(self.trainset), + {"train_acc": float(acc), "train_loss": float(loss)}, + ) + + def evaluate(self, parameters, config): + self.train_state = set_params(self.train_state, parameters) + + losses = [] + accs = [] + for batch in self.testset: + _, loss, accuracy = apply_model( + self.train_state, batch["image"], batch["label"] + ) + losses.append(float(loss)) + accs.append(float(accuracy)) + + return np.mean(losses), len(self.testset), {"accuracy": np.mean(accs)} + + +def client_fn(context: Context): + + num_partitions = context.node_config["num-partitions"] + partition_id = context.node_config["partition-id"] + batch_size = context.run_config["batch-size"] + trainset, testset = load_data(partition_id, num_partitions, batch_size) + + # Create train state object (model + optimizer) + lr = context.run_config["learning-rate"] + train_state = create_train_state(lr) + + # Return Client instance + return FlowerClient(train_state, trainset, testset).to_client() + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/quickstart-jax/jaxexample/server_app.py b/examples/quickstart-jax/jaxexample/server_app.py new file mode 100644 index 000000000000..1accf9dabd21 --- /dev/null +++ b/examples/quickstart-jax/jaxexample/server_app.py @@ -0,0 +1,47 @@ +"""jaxexample: A Flower / JAX app.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from jax import random + +from jaxexample.task import create_model, get_params + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + + # Initialize global model + rng = random.PRNGKey(0) + rng, _ = random.split(rng) + _, model_params = create_model(rng) + params = get_params(model_params) + initial_parameters = ndarrays_to_parameters(params) + + # Define strategy + strategy = FedAvg( + fraction_fit=0.4, + fraction_evaluate=0.5, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=initial_parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-jax/jaxexample/task.py b/examples/quickstart-jax/jaxexample/task.py new file mode 100644 index 000000000000..3b923dbe6ae8 --- /dev/null +++ b/examples/quickstart-jax/jaxexample/task.py @@ -0,0 +1,152 @@ +"""jaxexample: A Flower / JAX app.""" + +import warnings + +import jax +import jax.numpy as jnp +import numpy as np +import optax +from datasets.utils.logging import disable_progress_bar +from flax import linen as nn +from flax.training.train_state import TrainState +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +disable_progress_bar() + +rng = jax.random.PRNGKey(0) +rng, init_rng = jax.random.split(rng) + +warnings.filterwarnings("ignore", category=UserWarning) +warnings.filterwarnings("ignore", category=RuntimeWarning) + + +class CNN(nn.Module): + """A simple CNN model.""" + + @nn.compact + def __call__(self, x): + x = nn.Conv(features=6, kernel_size=(5, 5))(x) + x = nn.relu(x) + x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) + x = nn.Conv(features=16, kernel_size=(5, 5))(x) + x = nn.relu(x) + x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2)) + x = x.reshape((x.shape[0], -1)) # flatten + x = nn.Dense(features=120)(x) + x = nn.relu(x) + x = nn.Dense(features=84)(x) + x = nn.relu(x) + x = nn.Dense(features=10)(x) + return x + + +def create_model(rng): + cnn = CNN() + return cnn, cnn.init(rng, jnp.ones([1, 28, 28, 1]))["params"] + + +def create_train_state(learning_rate: float): + """Creates initial `TrainState`.""" + + tx = optax.sgd(learning_rate, momentum=0.9) + model, model_params = create_model(rng) + return TrainState.create(apply_fn=model.apply, params=model_params, tx=tx) + + +def get_params(params): + """Get model parameters as list of numpy arrays.""" + return [np.array(param) for param in jax.tree_util.tree_leaves(params)] + + +def set_params(train_state: TrainState, global_params) -> TrainState: + """Create a new trainstate with the global_params.""" + new_params_dict = jax.tree_util.tree_unflatten( + jax.tree_util.tree_structure(train_state.params), global_params + ) + return train_state.replace(params=new_params_dict) + + +@jax.jit +def apply_model(state, images, labels): + """Computes gradients, loss and accuracy for a single batch.""" + + def loss_fn(params): + logits = state.apply_fn({"params": params}, images) + one_hot = jax.nn.one_hot(labels, 10) + loss = jnp.mean(optax.softmax_cross_entropy(logits=logits, labels=one_hot)) + return loss, logits + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, logits), grads = grad_fn(state.params) + accuracy = jnp.mean(jnp.argmax(logits, -1) == labels) + return grads, loss, accuracy + + +@jax.jit +def update_model(state, grads): + return state.apply_gradients(grads=grads) + + +def train(state, train_ds): + """Train for a single epoch.""" + + epoch_loss = [] + epoch_accuracy = [] + + for batch in train_ds: + batch_images = batch["image"] + batch_labels = batch["label"] + grads, loss, accuracy = apply_model(state, batch_images, batch_labels) + state = update_model(state, grads) + epoch_loss.append(loss) + epoch_accuracy.append(accuracy) + train_loss = np.mean(epoch_loss) + train_accuracy = np.mean(epoch_accuracy) + return state, train_loss, train_accuracy + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int, batch_size: int): + """Load partition MNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="mnist", + partitioners={"train": partitioner}, + ) + partition = fds.load_partition(partition_id) + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2) + + partition["train"].set_format("jax") + partition["test"].set_format("jax") + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["image"] = [ + jnp.expand_dims(jnp.float32(img), 3) / 255 for img in batch["image"] + ] + batch["label"] = [jnp.int16(label) for label in batch["label"]] + return batch + + train_partition = ( + partition["train"] + .batch(batch_size, num_proc=2, drop_last_batch=True) + .with_transform(apply_transforms) + ) + test_partition = ( + partition["test"] + .batch(batch_size, num_proc=2, drop_last_batch=True) + .with_transform(apply_transforms) + ) + + train_partition.shuffle(seed=1234) + test_partition.shuffle(seed=1234) + + return train_partition, test_partition diff --git a/examples/quickstart-jax/pyproject.toml b/examples/quickstart-jax/pyproject.toml index c956191369b5..09fd32f7a318 100644 --- a/examples/quickstart-jax/pyproject.toml +++ b/examples/quickstart-jax/pyproject.toml @@ -1,16 +1,38 @@ -[tool.poetry] -name = "jax_example" -version = "0.1.0" -description = "JAX example training a linear regression model with federated learning" -authors = ["The Flower Authors "] +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.0.0" -jax = "0.4.17" -jaxlib = "0.4.17" -scikit-learn = "1.1.1" +[project] +name = "jaxexample" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.4.0", + "datasets>=2.21.0", + "jax==0.4.31", + "jaxlib==0.4.31", + "flax==0.9.0", +] -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "jaxexample.server_app:app" +clientapp = "jaxexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 5 +learning-rate = 0.1 +batch-size = 64 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 50 diff --git a/examples/quickstart-jax/requirements.txt b/examples/quickstart-jax/requirements.txt deleted file mode 100644 index 964f07a51b7d..000000000000 --- a/examples/quickstart-jax/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0,<2.0 -jax==0.4.17 -jaxlib==0.4.17 -scikit-learn==1.1.1 diff --git a/examples/quickstart-jax/run.sh b/examples/quickstart-jax/run.sh deleted file mode 100755 index c64f362086aa..000000000000 --- a/examples/quickstart-jax/run.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-jax/server.py b/examples/quickstart-jax/server.py deleted file mode 100644 index 2bc3716d84ae..000000000000 --- a/examples/quickstart-jax/server.py +++ /dev/null @@ -1,7 +0,0 @@ -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/quickstart-mlcube/pyproject.toml b/examples/quickstart-mlcube/pyproject.toml index a2862bd5ebb7..0418efc0b440 100644 --- a/examples/quickstart-mlcube/pyproject.toml +++ b/examples/quickstart-mlcube/pyproject.toml @@ -9,8 +9,8 @@ description = "Keras Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true } +python = ">=3.9,<3.11" +flwr = ">=1.0,<2.0" # For development: { path = "../../", develop = true } tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } mlcube = "0.0.9" diff --git a/examples/quickstart-mlx/README.md b/examples/quickstart-mlx/README.md index 95b9ccf605b5..5914ce5f31dd 100644 --- a/examples/quickstart-mlx/README.md +++ b/examples/quickstart-mlx/README.md @@ -58,7 +58,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` > \[!TIP\] @@ -67,4 +67,4 @@ flwr run . --run-config num-server-rounds=5,learning-rate=0.05 ### Run with the Deployment Engine > \[!NOTE\] -> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates. diff --git a/examples/quickstart-mlx/pyproject.toml b/examples/quickstart-mlx/pyproject.toml index 36e39bcd6d78..3165a3d93881 100644 --- a/examples/quickstart-mlx/pyproject.toml +++ b/examples/quickstart-mlx/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with MLX and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "mlx==0.16.0", "numpy==1.26.4", diff --git a/examples/quickstart-monai/.gitignore b/examples/quickstart-monai/.gitignore index a218cab9669e..2626387e2a4f 100644 --- a/examples/quickstart-monai/.gitignore +++ b/examples/quickstart-monai/.gitignore @@ -1 +1,2 @@ MedNIST* +.data_download.lock diff --git a/examples/quickstart-monai/README.md b/examples/quickstart-monai/README.md index dc31f03e4b1b..8189a8e98406 100644 --- a/examples/quickstart-monai/README.md +++ b/examples/quickstart-monai/README.md @@ -4,88 +4,76 @@ dataset: [MedNIST] framework: [MONAI] --- -# Flower Example using MONAI +# Federated Learning with MONAI and Flower (Quickstart Example) This introductory example to Flower uses MONAI, but deep knowledge of MONAI is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. -Running this example in itself is quite easy. +Running this example in itself is quite easy. [MONAI](https://docs.monai.io/en/latest/index.html)(Medical Open Network for AI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of the PyTorch Ecosystem. This example uses a subset of the [MedMNIST](https://medmnist.com/) dataset including 6 classes, as done in [MONAI's classification demo](https://colab.research.google.com/drive/1wy8XUSnNWlhDNazFdvGBHLfdkGvOHBKe). Each client trains am [DenseNet121](https://docs.monai.io/en/stable/networks.html#densenet121) from MONAI. -[MONAI](https://docs.monai.io/en/latest/index.html)(Medical Open Network for AI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of the PyTorch Ecosystem. +> \[!NOTE\] +> This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to partition the MedMNIST dataset. Its a good example to show how to bring any dataset into Flower and partition it using any of the built-in [partitioners](https://flower.ai/docs/datasets/ref-api/flwr_datasets.partitioner.html) (e.g. `DirichletPartitioner`, `PathologicalPartitioner`). Learn [how to use partitioners](https://flower.ai/docs/datasets/tutorial-use-partitioners.html) in a step-by-step tutorial. -Its ambitions are: +## Set up the project -- developing a community of academic, industrial and clinical researchers collaborating on a common foundation; +### Clone the project -- creating state-of-the-art, end-to-end training workflows for healthcare imaging; - -- providing researchers with an optimized and standardized way to create and evaluate deep learning models. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-monai . && rm -rf _tmp && cd quickstart-monai +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-monai . \ + && rm -rf _tmp \ + && cd quickstart-monai ``` -This will create a new directory called `quickstart-monai` containing the following files: +This will create a new directory called `quickstart-monai` with the following structure: ```shell --- pyproject.toml --- requirements.txt --- client.py --- data.py --- model.py --- server.py --- README.md +quickstart-monai +├── monaiexample +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `monai` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. +### Install dependencies and project -#### Poetry +Install the dependencies defined in `pyproject.toml` as well as the `monaiexample` package. -```shell -poetry install -poetry shell +```bash +pip install -e . ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` +## Run the project -If you don't see any errors you're good to go! +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -#### pip +### Run with the Simulation Engine -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +> \[!TIP\] +> This example runs faster when the `ClientApp`s have access to a GPU. If your system has one, you can make use of it by configuring the `backend.client-resources` component in `pyproject.toml`. If you want to try running the example with GPU right away, use the `local-simulation-gpu` federation as shown below. -```shell -pip install -r requirements.txt +```bash +# Run with the default federation (CPU only) +flwr run . ``` -## Run Federated Learning with MONAI and Flower - -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: +Run the project in the `local-simulation-gpu` federation that gives CPU and GPU resources to each `ClientApp`. By default, at most 4x`ClientApp` will run in parallel in the available GPU. -```shell -python3 server.py +```bash +# Run with the `local-simulation-gpu` federation +flwr run . local-simulation-gpu ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. Clients will train a [DenseNet121](https://docs.monai.io/en/stable/networks.html#densenet121) from MONAI. If a GPU is present in your system, clients will use it. - -Start client 1 in the first terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py --partition-id 0 +```bash +flwr run . --run-config "num-server-rounds=5 batch-size=32" ``` -Start client 2 in the second terminal: - -```shell -python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that the federated training is starting. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-monai) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-monai/client.py b/examples/quickstart-monai/client.py deleted file mode 100644 index 1401928af1ff..000000000000 --- a/examples/quickstart-monai/client.py +++ /dev/null @@ -1,61 +0,0 @@ -import argparse -import warnings -from collections import OrderedDict - -import flwr as fl -import torch -from monai.networks.nets.densenet import DenseNet121 - -from data import load_data -from model import test, train - -warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def __init__(self, net, trainloader, testloader, device): - self.net = net - self.trainloader = trainloader - self.testloader = testloader - self.device = device - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.net.state_dict().items()] - - def set_parameters(self, parameters): - params_dict = zip(self.net.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.net.load_state_dict(state_dict, strict=True) - - def fit(self, parameters, config): - self.set_parameters(parameters) - train(self.net, self.trainloader, epoch_num=1, device=self.device) - return self.get_parameters(config={}), len(self.trainloader), {} - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.net, self.testloader, self.device) - return loss, len(self.testloader), {"accuracy": accuracy} - - -if __name__ == "__main__": - total_partitions = 10 - parser = argparse.ArgumentParser() - parser.add_argument( - "--partition-id", type=int, choices=range(total_partitions), required=True - ) - args = parser.parse_args() - - # Load model and data (simple CNN, CIFAR-10) - trainloader, _, testloader, num_class = load_data( - total_partitions, args.partition_id - ) - net = DenseNet121(spatial_dims=2, in_channels=1, out_channels=num_class).to(DEVICE) - - # Start Flower client - fl.client.start_numpy_client( - server_address="127.0.0.1:8080", - client=FlowerClient(net, trainloader, testloader, DEVICE), - ) diff --git a/examples/quickstart-monai/data.py b/examples/quickstart-monai/data.py deleted file mode 100644 index d184476522e8..000000000000 --- a/examples/quickstart-monai/data.py +++ /dev/null @@ -1,158 +0,0 @@ -import os -import tarfile -from urllib import request - -import numpy as np -from monai.data import DataLoader, Dataset -from monai.transforms import ( - Compose, - EnsureChannelFirst, - LoadImage, - RandFlip, - RandRotate, - RandZoom, - ScaleIntensity, - ToTensor, -) - - -def _partition(files_list, labels_list, num_shards, index): - total_size = len(files_list) - assert total_size == len( - labels_list - ), f"List of datapoints and labels must be of the same length" - shard_size = total_size // num_shards - - # Calculate start and end indices for the shard - start_idx = index * shard_size - if index == num_shards - 1: - # Last shard takes the remainder - end_idx = total_size - else: - end_idx = start_idx + shard_size - - # Create a subset for the shard - files = files_list[start_idx:end_idx] - labels = labels_list[start_idx:end_idx] - return files, labels - - -def load_data(num_shards, index): - image_file_list, image_label_list, _, num_class = _download_data() - - # Get partition given index - files_list, labels_list = _partition( - image_file_list, image_label_list, num_shards, index - ) - - trainX, trainY, valX, valY, testX, testY = _split_data( - files_list, labels_list, len(files_list) - ) - train_transforms, val_transforms = _get_transforms() - - train_ds = MedNISTDataset(trainX, trainY, train_transforms) - train_loader = DataLoader(train_ds, batch_size=300, shuffle=True) - - val_ds = MedNISTDataset(valX, valY, val_transforms) - val_loader = DataLoader(val_ds, batch_size=300) - - test_ds = MedNISTDataset(testX, testY, val_transforms) - test_loader = DataLoader(test_ds, batch_size=300) - - return train_loader, val_loader, test_loader, num_class - - -class MedNISTDataset(Dataset): - def __init__(self, image_files, labels, transforms): - self.image_files = image_files - self.labels = labels - self.transforms = transforms - - def __len__(self): - return len(self.image_files) - - def __getitem__(self, index): - return self.transforms(self.image_files[index]), self.labels[index] - - -def _download_data(): - data_dir = "./MedNIST/" - _download_and_extract( - "https://dl.dropboxusercontent.com/s/5wwskxctvcxiuea/MedNIST.tar.gz", - os.path.join(data_dir), - ) - - class_names = sorted( - [x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))] - ) - num_class = len(class_names) - image_files = [ - [ - os.path.join(data_dir, class_name, x) - for x in os.listdir(os.path.join(data_dir, class_name)) - ] - for class_name in class_names - ] - image_file_list = [] - image_label_list = [] - for i, class_name in enumerate(class_names): - image_file_list.extend(image_files[i]) - image_label_list.extend([i] * len(image_files[i])) - num_total = len(image_label_list) - return image_file_list, image_label_list, num_total, num_class - - -def _split_data(image_file_list, image_label_list, num_total): - valid_frac, test_frac = 0.1, 0.1 - trainX, trainY = [], [] - valX, valY = [], [] - testX, testY = [], [] - - for i in range(num_total): - rann = np.random.random() - if rann < valid_frac: - valX.append(image_file_list[i]) - valY.append(image_label_list[i]) - elif rann < test_frac + valid_frac: - testX.append(image_file_list[i]) - testY.append(image_label_list[i]) - else: - trainX.append(image_file_list[i]) - trainY.append(image_label_list[i]) - - return trainX, trainY, valX, valY, testX, testY - - -def _get_transforms(): - train_transforms = Compose( - [ - LoadImage(image_only=True), - EnsureChannelFirst(), - ScaleIntensity(), - RandRotate(range_x=15, prob=0.5, keep_size=True), - RandFlip(spatial_axis=0, prob=0.5), - RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5, keep_size=True), - ToTensor(), - ] - ) - - val_transforms = Compose( - [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity(), ToTensor()] - ) - - return train_transforms, val_transforms - - -def _download_and_extract(url, dest_folder): - if not os.path.isdir(dest_folder): - # Download the tar.gz file - tar_gz_filename = url.split("/")[-1] - if not os.path.isfile(tar_gz_filename): - with request.urlopen(url) as response, open( - tar_gz_filename, "wb" - ) as out_file: - out_file.write(response.read()) - - # Extract the tar.gz file - with tarfile.open(tar_gz_filename, "r:gz") as tar_ref: - tar_ref.extractall() diff --git a/examples/quickstart-monai/model.py b/examples/quickstart-monai/model.py deleted file mode 100644 index 4c74d50553e4..000000000000 --- a/examples/quickstart-monai/model.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch - - -def train(model, train_loader, epoch_num, device): - loss_function = torch.nn.CrossEntropyLoss() - optimizer = torch.optim.Adam(model.parameters(), 1e-5) - for _ in range(epoch_num): - model.train() - for inputs, labels in train_loader: - optimizer.zero_grad() - loss_function(model(inputs.to(device)), labels.to(device)).backward() - optimizer.step() - - -def test(model, test_loader, device): - model.eval() - loss = 0.0 - y_true = list() - y_pred = list() - loss_function = torch.nn.CrossEntropyLoss() - with torch.no_grad(): - for test_images, test_labels in test_loader: - out = model(test_images.to(device)) - test_labels = test_labels.to(device) - loss += loss_function(out, test_labels).item() - pred = out.argmax(dim=1) - for i in range(len(pred)): - y_true.append(test_labels[i].item()) - y_pred.append(pred[i].item()) - accuracy = sum([1 if t == p else 0 for t, p in zip(y_true, y_pred)]) / len( - test_loader.dataset - ) - return loss, accuracy diff --git a/examples/quickstart-monai/monaiexample/__init__.py b/examples/quickstart-monai/monaiexample/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/examples/quickstart-monai/monaiexample/client_app.py b/examples/quickstart-monai/monaiexample/client_app.py new file mode 100644 index 000000000000..c0dcac0cdae2 --- /dev/null +++ b/examples/quickstart-monai/monaiexample/client_app.py @@ -0,0 +1,41 @@ +"""monaiexample: A Flower / MONAI app.""" + +import torch +from flwr.common import Context +from flwr.client import NumPyClient, ClientApp + +from monaiexample.task import load_data, load_model, test, train, get_params, set_params + + +# Define Flower client +class FlowerClient(NumPyClient): + def __init__(self, net, trainloader, valloader): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + def fit(self, parameters, config): + set_params(self.net, parameters) + train(self.net, self.trainloader, epoch_num=1, device=self.device) + return get_params(self.net), len(self.trainloader), {} + + def evaluate(self, parameters, config): + set_params(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader), {"accuracy": accuracy} + + +def client_fn(context: Context): + + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + batch_size = context.run_config["batch-size"] + trainloader, valloader = load_data(num_partitions, partition_id, batch_size) + net = load_model() + + return FlowerClient(net, trainloader, valloader).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/quickstart-monai/monaiexample/server_app.py b/examples/quickstart-monai/monaiexample/server_app.py new file mode 100644 index 000000000000..f68d3887a488 --- /dev/null +++ b/examples/quickstart-monai/monaiexample/server_app.py @@ -0,0 +1,46 @@ +"""monaiexample: A Flower / MONAI app.""" + +from typing import List, Tuple + +from flwr.common import Metrics, Context, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg + +from monaiexample.task import load_model, get_params + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + + # Init model + model = load_model() + + # Convert model parameters to flwr.common.Parameters + ndarrays = get_params(model) + global_model_init = ndarrays_to_parameters(ndarrays) + + # Define strategy + fraction_fit = context.run_config["fraction-fit"] + strategy = FedAvg( + fraction_fit=fraction_fit, + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=global_model_init, + ) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/quickstart-monai/monaiexample/task.py b/examples/quickstart-monai/monaiexample/task.py new file mode 100644 index 000000000000..4f7972d455fd --- /dev/null +++ b/examples/quickstart-monai/monaiexample/task.py @@ -0,0 +1,200 @@ +"""monaiexample: A Flower / MONAI app.""" + +import os +import tarfile +from urllib import request +from collections import OrderedDict + +import torch +import monai +from monai.networks.nets import densenet +from monai.transforms import ( + Compose, + EnsureChannelFirst, + LoadImage, + RandFlip, + RandRotate, + RandZoom, + ScaleIntensity, + ToTensor, +) +from filelock import FileLock +from datasets import Dataset +from flwr_datasets.partitioner import IidPartitioner + + +def load_model(): + """Load a DenseNet12.""" + return densenet.DenseNet121(spatial_dims=2, in_channels=1, out_channels=6) + + +def get_params(model): + """Return tensors in the model's state_dict.""" + return [val.cpu().numpy() for _, val in model.state_dict().items()] + + +def set_params(model, ndarrays): + """Apply parameters to a model.""" + params_dict = zip(model.state_dict().keys(), ndarrays) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + model.load_state_dict(state_dict, strict=True) + + +def train(model, train_loader, epoch_num, device): + """Train a model using the supplied dataloader.""" + model.to(device) + loss_function = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.Adam(model.parameters(), 1e-5) + for _ in range(epoch_num): + model.train() + for batch in train_loader: + images, labels = batch["img"], batch["label"] + optimizer.zero_grad() + loss_function(model(images.to(device)), labels.to(device)).backward() + optimizer.step() + + +def test(model, test_loader, device): + """Evaluate a model on a held-out dataset.""" + model.to(device) + model.eval() + loss = 0.0 + y_true = list() + y_pred = list() + loss_function = torch.nn.CrossEntropyLoss() + with torch.no_grad(): + for batch in test_loader: + images, labels = batch["img"], batch["label"] + out = model(images.to(device)) + labels = labels.to(device) + loss += loss_function(out, labels).item() + pred = out.argmax(dim=1) + for i in range(len(pred)): + y_true.append(labels[i].item()) + y_pred.append(pred[i].item()) + accuracy = sum([1 if t == p else 0 for t, p in zip(y_true, y_pred)]) / len( + test_loader.dataset + ) + return loss, accuracy + + +def _get_transforms(): + """Return transforms to be used for training and evaluation.""" + train_transforms = Compose( + [ + LoadImage(image_only=True), + EnsureChannelFirst(), + ScaleIntensity(), + RandRotate(range_x=15, prob=0.5, keep_size=True), + RandFlip(spatial_axis=0, prob=0.5), + RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5, keep_size=True), + ToTensor(), + ] + ) + + val_transforms = Compose( + [LoadImage(image_only=True), EnsureChannelFirst(), ScaleIntensity(), ToTensor()] + ) + + return train_transforms, val_transforms + + +def get_apply_transforms_fn(transforms_to_apply): + """Return a function that applies the transforms passed as input argument.""" + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [transforms_to_apply(img) for img in batch["img_file"]] + return batch + + return apply_transforms + + +ds = None +partitioner = None + + +def load_data(num_partitions, partition_id, batch_size): + """Download dataset, partition it and return data loader of specific partition.""" + # Set dataset and partitioner only once + global ds, partitioner + if ds is None: + image_file_list, image_label_list = _download_data() + + # Construct HuggingFace dataset + ds = Dataset.from_dict({"img_file": image_file_list, "label": image_label_list}) + # Set partitioner + partitioner = IidPartitioner(num_partitions) + partitioner.dataset = ds + + partition = partitioner.load_partition(partition_id) + + # Split train/validation + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + + # Get transforms + train_t, test_t = _get_transforms() + + # Apply transforms individually to each split + train_partition = partition_train_test["train"] + test_partition = partition_train_test["test"] + + partition_train = train_partition.with_transform(get_apply_transforms_fn(train_t)) + partition_val = test_partition.with_transform(get_apply_transforms_fn(test_t)) + + # Create dataloaders + train_loader = monai.data.DataLoader( + partition_train, batch_size=batch_size, shuffle=True + ) + val_loader = monai.data.DataLoader(partition_val, batch_size=batch_size) + + return train_loader, val_loader + + +def _download_data(): + """Download and extract dataset.""" + data_dir = "./MedNIST/" + _download_and_extract_if_needed( + "https://dl.dropboxusercontent.com/s/5wwskxctvcxiuea/MedNIST.tar.gz", + os.path.join(data_dir), + ) + + # Compute list of files and thier associated labels + class_names = sorted( + [x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))] + ) + image_files = [ + [ + os.path.join(data_dir, class_name, x) + for x in os.listdir(os.path.join(data_dir, class_name)) + ] + for class_name in class_names + ] + image_file_list = [] + image_label_list = [] + for i, _ in enumerate(class_names): + image_file_list.extend(image_files[i]) + image_label_list.extend([i] * len(image_files[i])) + + return image_file_list, image_label_list + + +def _download_and_extract_if_needed(url, dest_folder): + """Download dataset if not present.""" + + # Logic behind a filelock to prevent multiple processes (e.g. ClientApps) + # from downloading the dataset at the same time. + with FileLock(".data_download.lock"): + if not os.path.isdir(dest_folder): + # Download the tar.gz file + tar_gz_filename = url.split("/")[-1] + if not os.path.isfile(tar_gz_filename): + with ( + request.urlopen(url) as response, + open(tar_gz_filename, "wb") as out_file, + ): + out_file.write(response.read()) + + # Extract the tar.gz file + with tarfile.open(tar_gz_filename, "r:gz") as tar_ref: + tar_ref.extractall() diff --git a/examples/quickstart-monai/pyproject.toml b/examples/quickstart-monai/pyproject.toml index 2b77a2fc061f..7a6e766bb853 100644 --- a/examples/quickstart-monai/pyproject.toml +++ b/examples/quickstart-monai/pyproject.toml @@ -1,19 +1,41 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "quickstart-monai" -version = "0.1.0" -description = "MONAI Federated Learning Quickstart with Flower" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -torch = "1.13.1" -tqdm = "4.66.3" -scikit-learn = "1.3.1" -monai = { version = "1.3.0", extras=["gdown", "nibabel", "tqdm", "itk"] } -numpy = "1.24.4" -pillow = "10.2.0" +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "monaiexample" +version = "1.0.0" +description = "Federated Learning with MONAI and Flower (Quickstart Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]==1.12.0", + "flwr-datasets[vision]>=0.3.0", + "monai==1.3.2", + "filelock==3.15.4", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "monaiexample.server_app:app" +clientapp = "monaiexample.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 5 +fraction-fit = 0.5 +batch-size = 128 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 + +[tool.flwr.federations.local-simulation-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 4 +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApps will run in a given GPU diff --git a/examples/quickstart-monai/requirements.txt b/examples/quickstart-monai/requirements.txt deleted file mode 100644 index e3f1e463c629..000000000000 --- a/examples/quickstart-monai/requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -flwr>=1.0, <2.0 -torch==1.13.1 -tqdm==4.65.0 -scikit-learn==1.3.1 -monai[gdown,nibabel,tqdm,itk]==1.3.0 -numpy==1.24.4 -pillow==10.2.0 diff --git a/examples/quickstart-monai/run.sh b/examples/quickstart-monai/run.sh deleted file mode 100755 index 1da60bccb86d..000000000000 --- a/examples/quickstart-monai/run.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -python -c "from data import _download_data; _download_data()" - -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id $i & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-monai/server.py b/examples/quickstart-monai/server.py deleted file mode 100644 index fe691a88aba0..000000000000 --- a/examples/quickstart-monai/server.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import List, Tuple - -import flwr as fl -from flwr.common import Metrics - - -# Define metric aggregation function -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -# Define strategy -strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/quickstart-pandas/README.md b/examples/quickstart-pandas/README.md index 0b4b3a6ac78a..3f522b26834d 100644 --- a/examples/quickstart-pandas/README.md +++ b/examples/quickstart-pandas/README.md @@ -4,85 +4,69 @@ dataset: [Iris] framework: [pandas] --- -# Flower Example using Pandas +# Federated Learning with Pandas and Flower (Quickstart Example) -This introductory example to Flower uses Pandas, but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to -download, partition and preprocess the dataset. +> \[!CAUTION\] +> This example uses Flower's low-level API which remains a preview feature and subject to change. Both `ClientApp` and `ServerApp` operate directly on [Message](https://flower.ai/docs/framework/ref-api/flwr.common.Message.html) and [RecordSet](https://flower.ai/docs/framework/ref-api/flwr.common.RecordSet.html) objects. + +This introductory example to Flower uses [Pandas](https://pandas.pydata.org/), but deep knowledge of Pandas is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to +download, partition and preprocess the [Iris dataset](https://huggingface.co/datasets/scikit-learn/iris). Running this example in itself is quite easy. -## Project Setup +This example implements a form of Federated Analyics by which instead of training a model using locally available data, the nodes run a query on the data they own. In this example the query is to compute the histogram on specific columns of the dataset. These metrics are sent to the `ServerApp` for aggregation. -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +## Set up the project -```shell -$ git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/quickstart-pandas . && rm -rf _tmp && cd quickstart-pandas -``` +### Clone the project -This will create a new directory called `quickstart-pandas` containing the following files: +Start by cloning the example project. ```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- start.sh --- README.md +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/quickstart-pandas . \ + && rm -rf _tmp && cd quickstart-pandas ``` -If you don't plan on using the `run.sh` script that automates the run, you should first download the data and put it in a `data` folder, this can be done by executing: +This will create a new directory called `quickstart-pandas` with the following structure: ```shell -$ mkdir -p ./data -$ python -c "from sklearn.datasets import load_iris; load_iris(as_frame=True)['data'].to_csv('./data/client.csv')" +quickstart-pandas +├── pandas_example +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ └── server_app.py # Defines your ServerApp +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing Dependencies - -Project dependencies (such as `pandas` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +### Install dependencies and project -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: +Install the dependencies defined in `pyproject.toml` as well as the `pandas_example` package. -```shell -poetry run python3 -c "import flwr" +```bash +pip install -e . ``` -If you don't see any errors you're good to go! +## Run the project -#### pip +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +### Run with the Simulation Engine -```shell -pip install -r requirements.txt +```bash +flwr run . ``` -## Run Federated Analytics with Pandas and Flower +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example -Afterwards you are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -$ python3 server.py +```bash +flwr run . --run-config num-server-rounds=5 ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart PyTorch tutorial](https://flower.ai/docs/framework/tutorial-quickstart-pandas.html) -```shell -$ python3 client.py --partition-id 0 -``` - -Start client 2 in the second terminal: - -```shell -$ python3 client.py --partition-id 1 -``` +### Run with the Deployment Engine -You will see that the server is printing aggregated statistics about the dataset distributed amongst clients. Have a look to the [Flower Quickstarter documentation](https://flower.ai/docs/quickstart-pandas.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/quickstart-pandas/client.py b/examples/quickstart-pandas/client.py deleted file mode 100644 index 5a501e3517e6..000000000000 --- a/examples/quickstart-pandas/client.py +++ /dev/null @@ -1,62 +0,0 @@ -import argparse -from typing import Dict, List, Tuple - -import flwr as fl -import numpy as np -import pandas as pd -from flwr_datasets import FederatedDataset - -column_names = ["sepal_length", "sepal_width"] - - -def compute_hist(df: pd.DataFrame, col_name: str) -> np.ndarray: - freqs, _ = np.histogram(df[col_name]) - return freqs - - -# Define Flower client -class FlowerClient(fl.client.NumPyClient): - def __init__(self, X: pd.DataFrame): - self.X = X - - def fit( - self, parameters: List[np.ndarray], config: Dict[str, str] - ) -> Tuple[List[np.ndarray], int, Dict]: - hist_list = [] - # Execute query locally - for c in self.X.columns: - hist = compute_hist(self.X, c) - hist_list.append(hist) - return ( - hist_list, - len(self.X), - {}, - ) - - -if __name__ == "__main__": - N_CLIENTS = 2 - - parser = argparse.ArgumentParser(description="Flower") - parser.add_argument( - "--partition-id", - type=int, - choices=range(0, N_CLIENTS), - required=True, - help="Specifies the partition id of artificially partitioned datasets.", - ) - args = parser.parse_args() - partition_id = args.partition_id - - # Load the partition data - fds = FederatedDataset(dataset="hitorilabs/iris", partitioners={"train": N_CLIENTS}) - - dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] - # Use just the specified columns - X = dataset[column_names] - - # Start Flower client - fl.client.start_client( - server_address="127.0.0.1:8080", - client=FlowerClient(X).to_client(), - ) diff --git a/examples/quickstart-pandas/pandas_example/__init__.py b/examples/quickstart-pandas/pandas_example/__init__.py new file mode 100644 index 000000000000..9e5b1a942dd8 --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/__init__.py @@ -0,0 +1 @@ +"""pandas_example: A Flower / Pandas app.""" diff --git a/examples/quickstart-pandas/pandas_example/client_app.py b/examples/quickstart-pandas/pandas_example/client_app.py new file mode 100644 index 000000000000..0194b0dadf3a --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/client_app.py @@ -0,0 +1,59 @@ +"""pandas_example: A Flower / Pandas app.""" + +import warnings + +import numpy as np +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +from flwr.client import ClientApp +from flwr.common import Context, Message, MetricsRecord, RecordSet + +fds = None # Cache FederatedDataset + +warnings.filterwarnings("ignore", category=UserWarning) + + +def get_clientapp_dataset(partition_id: int, num_partitions: int): + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="scikit-learn/iris", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("pandas")[:] + # Use just the specified columns + return dataset[["SepalLengthCm", "SepalWidthCm"]] + + +# Flower ClientApp +app = ClientApp() + + +@app.query() +def query(msg: Message, context: Context): + """Construct histogram of local dataset and report to `ServerApp`.""" + + # Read the node_config to fetch data partition associated to this node + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + dataset = get_clientapp_dataset(partition_id, num_partitions) + + metrics = {} + # Compute some statistics for each column in the dataframe + for feature_name in dataset.columns: + # Compute histogram + freqs, _ = np.histogram(dataset[feature_name], bins=np.linspace(2.0, 10.0, 10)) + metrics[feature_name] = freqs.tolist() + + # Compute weighted average + metrics[f"{feature_name}_avg"] = dataset[feature_name].mean() * len(dataset) + metrics[f"{feature_name}_count"] = len(dataset) + + reply_content = RecordSet(metrics_records={"query_results": MetricsRecord(metrics)}) + + return msg.create_reply(reply_content) diff --git a/examples/quickstart-pandas/pandas_example/server_app.py b/examples/quickstart-pandas/pandas_example/server_app.py new file mode 100644 index 000000000000..95384c3fa978 --- /dev/null +++ b/examples/quickstart-pandas/pandas_example/server_app.py @@ -0,0 +1,87 @@ +"""pandas_example: A Flower / Pandas app.""" + +import random +import time +from logging import INFO + +import numpy as np + +from flwr.common import Context, MessageType, RecordSet, Message +from flwr.common.logger import log +from flwr.server import Driver, ServerApp + +app = ServerApp() + + +@app.main() +def main(driver: Driver, context: Context) -> None: + """This `ServerApp` construct a histogram from partial-histograms reported by the + `ClientApp`s.""" + + num_rounds = context.run_config["num-server-rounds"] + min_nodes = 2 + fraction_sample = context.run_config["fraction-sample"] + + for server_round in range(num_rounds): + log(INFO, "") # Add newline for log readability + log(INFO, "Starting round %s/%s", server_round + 1, num_rounds) + + # Loop and wait until enough nodes are available. + all_node_ids = [] + while len(all_node_ids) < min_nodes: + all_node_ids = driver.get_node_ids() + if len(all_node_ids) >= min_nodes: + # Sample nodes + num_to_sample = int(len(all_node_ids) * fraction_sample) + node_ids = random.sample(all_node_ids, num_to_sample) + break + log(INFO, "Waiting for nodes to connect...") + time.sleep(2) + + log(INFO, "Sampled %s nodes (out of %s)", len(node_ids), len(all_node_ids)) + + # Create messages + recordset = RecordSet() + messages = [] + for node_id in node_ids: # one message for each node + message = driver.create_message( + content=recordset, + message_type=MessageType.QUERY, # target `query` method in ClientApp + dst_node_id=node_id, + group_id=str(server_round), + ) + messages.append(message) + + # Send messages and wait for all results + replies = driver.send_and_receive(messages) + log(INFO, "Received %s/%s results", len(replies), len(messages)) + + # Aggregate partial histograms + aggregated_hist = aggregate_partial_histograms(replies) + + # Display aggregated histogram + log(INFO, "Aggregated histogram: %s", aggregated_hist) + + +def aggregate_partial_histograms(messages: Message): + """Aggregate partial histograms.""" + + aggregated_hist = {} + total_count = 0 + for rep in messages: + if rep.has_error(): + continue + query_results = rep.content.metrics_records["query_results"] + # Sum metrics + for k, v in query_results.items(): + if k in ["SepalLengthCm", "SepalWidthCm"]: + if k in aggregated_hist: + aggregated_hist[k] += np.array(v) + else: + aggregated_hist[k] = np.array(v) + if "_count" in k: + total_count += v + + # Verify aggregated histogram adds up to total reported count + assert total_count == sum([sum(v) for v in aggregated_hist.values()]) + return aggregated_hist diff --git a/examples/quickstart-pandas/pyproject.toml b/examples/quickstart-pandas/pyproject.toml index 2e6b1424bb54..a80311292acb 100644 --- a/examples/quickstart-pandas/pyproject.toml +++ b/examples/quickstart-pandas/pyproject.toml @@ -1,17 +1,39 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] -name = "quickstart-pandas" -version = "0.1.0" -description = "Pandas Federated Analytics Quickstart with Flower" -authors = ["Ragy Haddad "] -maintainers = ["The Flower Authors "] +[project] +name = "pandas_example" +version = "1.0.0" +description = "Federated Learning with Pandas and Flower (Quickstart Example)" +license = "Apache-2.0" +authors = [ + { name = "The Flower Authors", email = "hello@flower.ai" }, + { name = "Ragy Haddad", email = "ragy202@gmail.com" }, +] +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "numpy==1.24.4", + "pandas==2.0.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -numpy = "1.23.2" -pandas = "2.0.0" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "pandas_example.server_app:app" +clientapp = "pandas_example.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-sample = 1.0 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 4 diff --git a/examples/quickstart-pandas/requirements.txt b/examples/quickstart-pandas/requirements.txt deleted file mode 100644 index d44a3c6adab9..000000000000 --- a/examples/quickstart-pandas/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -numpy==1.23.2 -pandas==2.0.0 diff --git a/examples/quickstart-pandas/run.sh b/examples/quickstart-pandas/run.sh deleted file mode 100755 index 2ae1e582b8cf..000000000000 --- a/examples/quickstart-pandas/run.sh +++ /dev/null @@ -1,13 +0,0 @@ -echo "Starting server" -python server.py & -sleep 3 # Sleep for 3s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python client.py --partition-id ${i} & -done - -# This will allow you to use CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/quickstart-pandas/server.py b/examples/quickstart-pandas/server.py deleted file mode 100644 index 76cbd6194579..000000000000 --- a/examples/quickstart-pandas/server.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Dict, List, Optional, Tuple, Union - -import flwr as fl -import numpy as np -from flwr.common import ( - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - Parameters, - Scalar, - ndarrays_to_parameters, - parameters_to_ndarrays, -) -from flwr.server.client_manager import ClientManager -from flwr.server.client_proxy import ClientProxy -from flwr.server.strategy import Strategy - - -class FedAnalytics(Strategy): - def initialize_parameters( - self, client_manager: Optional[ClientManager] = None - ) -> Optional[Parameters]: - return None - - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - config = {} - fit_ins = FitIns(parameters, config) - clients = client_manager.sample(num_clients=2, min_num_clients=2) - return [(client, fit_ins) for client in clients] - - def aggregate_fit( - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - # Get results from fit - # Convert results - values_aggregated = [ - (parameters_to_ndarrays(fit_res.parameters)) for _, fit_res in results - ] - length_agg_hist = 0 - width_agg_hist = 0 - for val in values_aggregated: - length_agg_hist += val[0] - width_agg_hist += val[1] - - ndarr = np.concatenate( - (["Length:"], length_agg_hist, ["Width:"], width_agg_hist) - ) - return ndarrays_to_parameters(ndarr), {} - - def evaluate( - self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: - agg_hist = [arr.item() for arr in parameters_to_ndarrays(parameters)] - return 0, {"Aggregated histograms": agg_hist} - - def configure_evaluate( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: - pass - - def aggregate_evaluate( - self, - server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: - pass - - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=1), - strategy=FedAnalytics(), -) diff --git a/examples/quickstart-pytorch-lightning/README.md b/examples/quickstart-pytorch-lightning/README.md index e520be856962..0aa34db9af75 100644 --- a/examples/quickstart-pytorch-lightning/README.md +++ b/examples/quickstart-pytorch-lightning/README.md @@ -52,7 +52,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,max-epochs=2 +flwr run . --run-config "num-server-rounds=5 max-epochs=2" ``` ### Run with the Deployment Engine diff --git a/examples/quickstart-pytorch-lightning/pyproject.toml b/examples/quickstart-pytorch-lightning/pyproject.toml index 482fc1356527..e305d1ca75e8 100644 --- a/examples/quickstart-pytorch-lightning/pyproject.toml +++ b/examples/quickstart-pytorch-lightning/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch Lightning and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "pytorch-lightning<2.0.0; sys_platform == 'darwin'", "pytorch-lightning==1.6.0; sys_platform != 'darwin'", diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index e37d49194b01..d07f83a7ea85 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -55,7 +55,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` > \[!TIP\] diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index 29414962ba6b..fa086d18880d 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with PyTorch and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/examples/quickstart-sklearn-tabular/pyproject.toml b/examples/quickstart-sklearn-tabular/pyproject.toml index 2f2775e9fe90..4fc34ed58bb6 100644 --- a/examples/quickstart-sklearn-tabular/pyproject.toml +++ b/examples/quickstart-sklearn-tabular/pyproject.toml @@ -8,7 +8,7 @@ version = "1.0.0" description = "Federated Learning with scikit-learn and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.3.0", ] diff --git a/examples/quickstart-tabnet/pyproject.toml b/examples/quickstart-tabnet/pyproject.toml index 6b7311f068f0..8345d6bd3da2 100644 --- a/examples/quickstart-tabnet/pyproject.toml +++ b/examples/quickstart-tabnet/pyproject.toml @@ -9,7 +9,7 @@ description = "Tabnet Federated Learning Quickstart with Flower" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = ">=1.0,<2.0" tensorflow-cpu = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "platform_machine == \"x86_64\"" } tensorflow-macos = { version = ">=2.9.1,<2.11.1 || >2.11.1", markers = "sys_platform == \"darwin\" and platform_machine == \"arm64\"" } diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md index f1fa12a3393c..a162e756d799 100644 --- a/examples/quickstart-tensorflow/README.md +++ b/examples/quickstart-tensorflow/README.md @@ -56,7 +56,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,learning-rate=0.05 +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` > \[!TIP\] diff --git a/examples/quickstart-tensorflow/pyproject.toml b/examples/quickstart-tensorflow/pyproject.toml index 5441dab31a8e..f5fc566d654c 100644 --- a/examples/quickstart-tensorflow/pyproject.toml +++ b/examples/quickstart-tensorflow/pyproject.toml @@ -8,10 +8,10 @@ version = "1.0.0" description = "Federated Learning with Tensorflow/Keras and Flower (Quickstart Example)" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == \"x86_64\"", - "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"" + "tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == \"darwin\" and platform_machine == \"arm64\"", ] [tool.hatch.build.targets.wheel] packages = ["."] diff --git a/examples/quickstart-tensorflow/tfexample/client_app.py b/examples/quickstart-tensorflow/tfexample/client_app.py index 05bf15e074c2..fcea79ba7391 100644 --- a/examples/quickstart-tensorflow/tfexample/client_app.py +++ b/examples/quickstart-tensorflow/tfexample/client_app.py @@ -21,10 +21,6 @@ def __init__( self.batch_size = batch_size self.verbose = verbose - def get_parameters(self, config): - """Return the parameters of the model of this client.""" - return self.model.get_weights() - def fit(self, parameters, config): """Train the model with data of this client.""" self.model.set_weights(parameters) diff --git a/examples/quickstart-tensorflow/tfexample/server_app.py b/examples/quickstart-tensorflow/tfexample/server_app.py index 053e92588e67..a09ceccfb3f2 100644 --- a/examples/quickstart-tensorflow/tfexample/server_app.py +++ b/examples/quickstart-tensorflow/tfexample/server_app.py @@ -22,7 +22,6 @@ def server_fn(context: Context): """Construct components that set the ServerApp behaviour.""" # Let's define the global model and pass it to the strategy - # Note this is optional. parameters = ndarrays_to_parameters(load_model().get_weights()) # Define the strategy diff --git a/examples/simulation-pytorch/README.md b/examples/simulation-pytorch/README.md deleted file mode 100644 index 2dbfbc849ab7..000000000000 --- a/examples/simulation-pytorch/README.md +++ /dev/null @@ -1,103 +0,0 @@ ---- -tags: [basic, vision, fds, simulation] -dataset: [MNIST] -framework: [torch, torchvision] ---- - -# Flower Simulation example using PyTorch - -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. - -## Running the example (via Jupyter Notebook) - -Run the example on Google Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/simulation-pytorch/sim.ipynb) - -Alternatively, you can run `sim.ipynb` locally or in any other Jupyter environment. - -## Running the example - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/simulation-pytorch . && rm -rf flower && cd simulation-pytorch -``` - -This will create a new directory called `simulation-pytorch` containing the following files: - -``` --- README.md <- Your're reading this right now --- sim.ipynb <- Example notebook --- sim.py <- Example code --- utils.py <- auxiliary functions for this example --- pyproject.toml <- Example dependencies --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -### Run with `start_simulation()` - -Ensure you have activated your environment then: - -```bash -# and then run the example -python sim.py -``` - -You can adjust the CPU/GPU resources you assign to each of your virtual clients. By default, your clients will only use 1xCPU core. For example: - -```bash -# Will assign 2xCPUs to each client -python sim.py --num_cpus=2 - -# Will assign 2xCPUs and 25% of the GPU's VRAM to each client -# This means that you can have 4 concurrent clients on each GPU -# (assuming you have enough CPUs) -python sim.py --num_cpus=2 --num_gpus=0.25 -``` - -### Run with Flower Next (preview) - -Ensure you have activated your environment, then execute the command below. All `ClientApp` instances will run on CPU but the `ServerApp` will run on the GPU if one is available. Note that this is the case because the `Simulation Engine` only exposes certain resources to the `ClientApp` (based on the `client_resources` in `--backend-config`). - -```bash -# Run with the default backend-config. -# `--server-app` points to the `server` object in the sim.py file in this example. -# `--client-app` points to the `client` object in the sim.py file in this example. -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 -``` - -You can change the default resources assigned to each `ClientApp` by means of the `--backend-config` argument: - -```bash -# Tells the VCE to reserve 2x CPUs and 25% of available VRAM for each ClientApp -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus":2, "num_gpus":0.25}}' -``` - -Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-pytorch/pyproject.toml b/examples/simulation-pytorch/pyproject.toml deleted file mode 100644 index 5978c17f2c60..000000000000 --- a/examples/simulation-pytorch/pyproject.toml +++ /dev/null @@ -1,19 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "simulation-pytorch" -version = "0.1.0" -description = "Federated Learning Simulation with Flower and PyTorch" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.1.1" -torchvision = "0.16.1" - -[tool.poetry.group.dev.dependencies] -ipykernel = "^6.27.0" diff --git a/examples/simulation-pytorch/requirements.txt b/examples/simulation-pytorch/requirements.txt deleted file mode 100644 index 4dbecab3e546..000000000000 --- a/examples/simulation-pytorch/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -torch==2.1.1 -torchvision==0.16.1 -flwr-datasets[vision]>=0.0.2, <1.0.0 \ No newline at end of file diff --git a/examples/simulation-pytorch/sim.ipynb b/examples/simulation-pytorch/sim.ipynb deleted file mode 100644 index d225069cb444..000000000000 --- a/examples/simulation-pytorch/sim.ipynb +++ /dev/null @@ -1,629 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Environment Setup\n", - "\n", - "To start working with Flower, very little is required once you have activated your Python environment (e.g. via `conda`, `virtualenv`, `pyenv`, etc). If you are running this code on Colab, there is really nothing to do except to install Flower and other dependencies. The steps below have been verified to run in Colab.\n", - "\n", - "## Installing Flower\n", - "\n", - "You can install flower very conveniently from `pip`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# depending on your shell, you might need to add `\\` before `[` and `]`.\n", - "!pip install -q flwr[simulation]\n", - "!pip install flwr_datasets[vision]" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will be using the _simulation_ mode in Flower, which allows you to run a large number of clients without the overheads of manually managing devices. This is achieved via the [Virtual Client Engine](https://flower.ai/docs/framework/how-to-run-simulations.html) in Flower. With simulation, you can dynamically scale your experiments whether you run the code on your laptop, a machine with a single GPU, a server with multiple GPUs os even on a cluster with multiple servers. The `Virtual Client Engine` handles everything transparently and it allows you to specify how many resources (e.g. CPU cores, GPU VRAM) should be assigned to each virtual client." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "Flower is agnostic to your choice of ML Framework. Flower works with `PyTorch`, `Tensorflow`, `NumPy`, `🤗 Transformers`, `MXNet`, `JAX`, `scikit-learn`, `fastai`, `Pandas`. Flower also supports all major platforms: `iOS`, `Android` and plain `C++`. You can find a _quickstart-_ example for each of the above in the [Flower Repository](https://github.com/adap/flower/tree/main/examples) inside the `examples/` directory.\n", - "\n", - "In this tutorial we are going to use PyTorch, it comes pre-installed in your Collab runtime so there is no need to installed it again. If you wouuld like to install another version, you can still do that in the same way other packages are installed via `!pip`" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We are going to install some other dependencies you are likely familiar with. Let's install `maplotlib` to plot our results at the end." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "58b7af77-609f-4118-bd5b-5629a4b5a296" - }, - "outputs": [], - "source": [ - "!pip install matplotlib" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Preparing the experiment\n", - "\n", - "This tutorial is not so much about novel architectural designs so we keep things simple and make use of a typical CNN that is adequate for the MNIST image classification task.\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import torch.nn as nn\n", - "import torch.nn.functional as F\n", - "from torch.utils.data import DataLoader\n", - "\n", - "\n", - "class Net(nn.Module):\n", - " def __init__(self, num_classes: int) -> None:\n", - " super(Net, self).__init__()\n", - " self.conv1 = nn.Conv2d(1, 6, 5)\n", - " self.pool = nn.MaxPool2d(2, 2)\n", - " self.conv2 = nn.Conv2d(6, 16, 5)\n", - " self.fc1 = nn.Linear(16 * 4 * 4, 120)\n", - " self.fc2 = nn.Linear(120, 84)\n", - " self.fc3 = nn.Linear(84, num_classes)\n", - "\n", - " def forward(self, x: torch.Tensor) -> torch.Tensor:\n", - " x = self.pool(F.relu(self.conv1(x)))\n", - " x = self.pool(F.relu(self.conv2(x)))\n", - " x = x.view(-1, 16 * 4 * 4)\n", - " x = F.relu(self.fc1(x))\n", - " x = F.relu(self.fc2(x))\n", - " x = self.fc3(x)\n", - " return x" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We'll be training the model in a Federated setting. In order to do that, we need to define two functions:\n", - "\n", - "* `train()` that will train the model given a dataloader.\n", - "* `test()` that will be used to evaluate the performance of the model on held-out data, e.g., a training set." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def train(net, trainloader, optim, epochs, device: str):\n", - " \"\"\"Train the network on the training set.\"\"\"\n", - " criterion = torch.nn.CrossEntropyLoss()\n", - " net.train()\n", - " for _ in range(epochs):\n", - " for batch in trainloader:\n", - " images, labels = batch[\"image\"].to(device), batch[\"label\"].to(device)\n", - " optim.zero_grad()\n", - " loss = criterion(net(images), labels)\n", - " loss.backward()\n", - " optim.step()\n", - "\n", - "\n", - "def test(net, testloader, device: str):\n", - " \"\"\"Validate the network on the entire test set.\"\"\"\n", - " criterion = torch.nn.CrossEntropyLoss()\n", - " correct, loss = 0, 0.0\n", - " net.eval()\n", - " with torch.no_grad():\n", - " for data in testloader:\n", - " images, labels = data[\"image\"].to(device), data[\"label\"].to(device)\n", - " outputs = net(images)\n", - " loss += criterion(outputs, labels).item()\n", - " _, predicted = torch.max(outputs.data, 1)\n", - " correct += (predicted == labels).sum().item()\n", - " accuracy = correct / len(testloader.dataset)\n", - " return loss, accuracy" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The code we have written so far is not specific to Federated Learning. Then, what are the key differences between Federated Learning and Centralised Training? If you could only pick you, probably you'd say:\n", - "* Federated Learning is distributed -- the model is trained on-device by the participating clients.\n", - "* Data remains private and is owned by a specific _client_ -- the data is never sent to the central server.\n", - "\n", - "The are several more differences. But the above two are the main ones to always consider and that are common to all flavours of Federated Learning (e.g. _cross-device_ or _cross-silo_). The remaining of this tutorial is going to focus in transforming the code we have written so far for the centralised setting and construct a Federated Learning pipeline using Flower and PyTorch.\n", - "\n", - "Let's begin! 🚀" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## One Client, One Data Partition\n", - "\n", - "To start designing a Federated Learning pipeline we need to meet one of the key properties in FL: each client has its own data partition. To accomplish this with the MNIST dataset, we are going to generate N random partitions, where N is the total number of clients in our FL system.\n", - "\n", - "We can use [Flower Datasets](https://flower.ai/docs/datasets/) to effortlessly obtain an off-the-shelf partitioned dataset or partition one that isn't pre-partitioned. Let's choose MNIST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from datasets import Dataset\n", - "from flwr_datasets import FederatedDataset\n", - "from datasets.utils.logging import disable_progress_bar\n", - "\n", - "# Let's set a simulation involving a total of 100 clients\n", - "NUM_CLIENTS = 100\n", - "\n", - "# Download MNIST dataset and partition the \"train\" partition (so one can be assigned to each client)\n", - "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", - "# Let's keep the test set as is, and use it to evaluate the global model on the server\n", - "centralized_testset = mnist_fds.load_split(\"test\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's create a function that returns a set of transforms to apply to our images" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torchvision.transforms import ToTensor, Normalize, Compose\n", - "\n", - "\n", - "def apply_transforms(batch):\n", - " \"\"\"Get transformation for MNIST dataset\"\"\"\n", - "\n", - " # transformation to convert images to tensors and apply normalization\n", - " transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n", - " batch[\"image\"] = [transforms(img) for img in batch[\"image\"]]\n", - " return batch" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's next define how our FL clients will behave.\n", - "\n", - "## Defining a Flower Client\n", - "\n", - "You can think of a client in FL as an entity that owns some data and trains a model using this data. The caveat is that the model is being trained _collaboratively_ in Federation by multiple clients (sometimes up to hundreds of thousands) and, in most instances of FL, is sent by a central server.\n", - "\n", - "A Flower Client is a simple Python class with four distinct methods:\n", - "\n", - "* `fit()`: With this method, the client does on-device training for a number of epochs using its own data. At the end, the resulting model is sent back to the server for aggregation.\n", - "\n", - "* `evaluate()`: With this method, the server can evaluate the performance of the global model on the local validation set of a client. This can be used for instance when there is no centralised dataset on the server for validation/test. Also, this method can be use to asses the degree of personalisation of the model being federated.\n", - "\n", - "* `set_parameters()`: This method takes the parameters sent by the server and uses them to initialise the parameters of the local model that is ML framework specific (e.g. TF, Pytorch, etc).\n", - "\n", - "* `get_parameters()`: It extract the parameters from the local model and transforms them into a list of NumPy arrays. This ML framework-agnostic representation of the model will be sent to the server.\n", - "\n", - "Let's start by importing Flower!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import flwr as fl" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's defice our Flower Client class:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from collections import OrderedDict\n", - "from typing import Dict, List, Tuple\n", - "\n", - "from flwr.common import NDArrays, Scalar\n", - "\n", - "\n", - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, trainloader, valloader) -> None:\n", - " super().__init__()\n", - "\n", - " self.trainloader = trainloader\n", - " self.valloader = valloader\n", - " self.model = Net(num_classes=10)\n", - " # Determine device\n", - " self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " self.model.to(self.device) # send model to device\n", - "\n", - " def set_parameters(self, parameters):\n", - " \"\"\"With the model parameters received from the server,\n", - " overwrite the uninitialise model in this class with them.\"\"\"\n", - "\n", - " params_dict = zip(self.model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " # now replace the parameters\n", - " self.model.load_state_dict(state_dict, strict=True)\n", - "\n", - " def get_parameters(self, config: Dict[str, Scalar]):\n", - " \"\"\"Extract all model parameters and conver them to a list of\n", - " NumPy arryas. The server doesn't work with PyTorch/TF/etc.\"\"\"\n", - " return [val.cpu().numpy() for _, val in self.model.state_dict().items()]\n", - "\n", - " def fit(self, parameters, config):\n", - " \"\"\"This method train the model using the parameters sent by the\n", - " server on the dataset of this client. At then end, the parameters\n", - " of the locally trained model are communicated back to the server\"\"\"\n", - "\n", - " # copy parameters sent by the server into client's local model\n", - " self.set_parameters(parameters)\n", - "\n", - " # read from config\n", - " lr, epochs = config[\"lr\"], config[\"epochs\"]\n", - "\n", - " # Define the optimizer\n", - " optim = torch.optim.SGD(self.model.parameters(), lr=lr, momentum=0.9)\n", - "\n", - " # do local training\n", - " train(self.model, self.trainloader, optim, epochs=epochs, device=self.device)\n", - "\n", - " # return the model parameters to the server as well as extra info (number of training examples in this case)\n", - " return self.get_parameters({}), len(self.trainloader), {}\n", - "\n", - " def evaluate(self, parameters: NDArrays, config: Dict[str, Scalar]):\n", - " \"\"\"Evaluate the model sent by the server on this client's\n", - " local validation set. Then return performance metrics.\"\"\"\n", - "\n", - " self.set_parameters(parameters)\n", - " loss, accuracy = test(self.model, self.valloader, device=self.device)\n", - " # send statistics back to the server\n", - " return float(loss), len(self.valloader), {\"accuracy\": accuracy}" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Spend a few minutes to inspect the `FlowerClient` class above. Please ask questions if there is something unclear !\n", - "\n", - "Then keen-eyed among you might have realised that if we were to fuse the client's `fit()` and `evaluate()` methods, we'll end up with essentially the same as in the `run_centralised()` function we used in the Centralised Training part of this tutorial. And it is true!! In Federated Learning, the way clients perform local training makes use of the same principles as more traditional centralised setup. The key difference is that the dataset now is much smaller and it's never _\"seen\"_ by the entity running the FL workload (i.e. the central server).\n", - "\n", - "\n", - "Talking about the central server... we should define what strategy we want to make use of so the updated models sent from the clients back to the server at the end of the `fit()` method are aggregate.\n", - "\n", - "\n", - "## Choosing a Flower Strategy\n", - "\n", - "\n", - "A strategy sits at the core of the Federated Learning experiment. It is involved in all stages of a FL pipeline: sampling clients; sending the _global model_ to the clients so they can do `fit()`; receive the updated models from the clients and **aggregate** these to construct a new _global model_; define and execute global or federated evaluation; and more.\n", - "\n", - "Flower comes with [many strategies built-in](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy) and more to be available in the next release (`1.5` already!). For this tutorial, let's use what is arguable the most popular strategy out there: `FedAvg`.\n", - "\n", - "The way `FedAvg` works is simple but performs surprisingly well in practice. It is therefore one good strategy to start your experimentation. `FedAvg`, as its name implies, derives a new version of the _global model_ by taking the average of all the models sent by clients participating in the round. You can read all the details [in the paper](https://arxiv.org/abs/1602.05629).\n", - "\n", - "Let's see how we can define `FedAvg` using Flower. We use one of the callbacks called `evaluate_fn` so we can easily evaluate the state of the global model using a small centralised testset. Note this functionality is user-defined since it requires a choice in terms of ML-framework. (if you recall, Flower is framework agnostic).\n", - "\n", - "> This being said, centralised evaluation of the global model is only possible if there exists a centralised dataset that somewhat follows a similar distribution as the data that's spread across clients. In some cases having such centralised dataset for validation is not possible, so the only solution is to federate the evaluation of the _global model_. This is the default behaviour in Flower. If you don't specify teh `evaluate_fn` argument in your strategy, then, centralised global evaluation won't be performed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_evaluate_fn(centralized_testset: Dataset):\n", - " \"\"\"This is a function that returns a function. The returned\n", - " function (i.e. `evaluate_fn`) will be executed by the strategy\n", - " at the end of each round to evaluate the stat of the global\n", - " model.\"\"\"\n", - "\n", - " def evaluate_fn(server_round: int, parameters, config):\n", - " \"\"\"This function is executed by the strategy it will instantiate\n", - " a model and replace its parameters with those from the global model.\n", - " The, the model will be evaluate on the test set (recall this is the\n", - " whole MNIST test set).\"\"\"\n", - "\n", - " model = Net(num_classes=10)\n", - "\n", - " # Determine device\n", - " device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", - " model.to(device) # send model to device\n", - "\n", - " # set parameters to the model\n", - " params_dict = zip(model.state_dict().keys(), parameters)\n", - " state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})\n", - " model.load_state_dict(state_dict, strict=True)\n", - "\n", - " # Apply transform to dataset\n", - " testset = centralized_testset.with_transform(apply_transforms)\n", - "\n", - " testloader = DataLoader(testset, batch_size=50)\n", - " # call test\n", - " loss, accuracy = test(model, testloader, device)\n", - " return loss, {\"accuracy\": accuracy}\n", - "\n", - " return evaluate_fn" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We could now define a strategy just as shown (commented) above. Instead, let's see how additional (but entirely optional) functionality can be easily added to our strategy. We are going to define two additional auxiliary functions to: (1) be able to configure how clients do local training; and (2) define a function to aggregate the metrics that clients return after running their `evaluate` methods:\n", - "\n", - "1. `fit_config()`. This is a function that will be executed inside the strategy when configuring a new `fit` round. This function is relatively simple and only requires as input argument the round at which the FL experiment is at. In this example we simply return a Python dictionary to specify the number of epochs and learning rate each client should made use of inside their `fit()` methods. A more versatile implementation would add more hyperparameters (e.g. the learning rate) and adjust them as the FL process advances (e.g. reducing the learning rate in later FL rounds).\n", - "2. `weighted_average()`: This is an optional function to pass to the strategy. It will be executed after an evaluation round (i.e. when client run `evaluate()`) and will aggregate the metrics clients return. In this example, we use this function to compute the weighted average accuracy of clients doing `evaluate()`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flwr.common import Metrics\n", - "\n", - "\n", - "def fit_config(server_round: int) -> Dict[str, Scalar]:\n", - " \"\"\"Return a configuration with static batch size and (local) epochs.\"\"\"\n", - " config = {\n", - " \"epochs\": 1, # Number of local epochs done by clients\n", - " \"lr\": 0.01, # Learning rate to use by clients during fit()\n", - " }\n", - " return config\n", - "\n", - "\n", - "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", - " \"\"\"Aggregation function for (federated) evaluation metrics, i.e. those returned by\n", - " the client's evaluate() method.\"\"\"\n", - " # Multiply accuracy of each client by number of examples used\n", - " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", - " examples = [num_examples for num_examples, _ in metrics]\n", - "\n", - " # Aggregate and return custom metric (weighted average)\n", - " return {\"accuracy\": sum(accuracies) / sum(examples)}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can define our strategy:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # Sample 10% of available clients for training\n", - " fraction_evaluate=0.05, # Sample 5% of available clients for evaluation\n", - " on_fit_config_fn=fit_config,\n", - " evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics\n", - " evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "So far we have:\n", - "* created the dataset partitions (one for each client)\n", - "* defined the client class\n", - "* decided on a strategy to use\n", - "\n", - "Now we just need to launch the Flower FL experiment... not so fast! just one final function: let's create another callback that the Simulation Engine will use in order to span VirtualClients. As you can see this is really simple: construct a FlowerClient object, assigning each their own data partition." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.utils.data import DataLoader\n", - "\n", - "\n", - "def get_client_fn(dataset: FederatedDataset):\n", - " \"\"\"Return a function to construct a client.\n", - "\n", - " The VirtualClientEngine will execute this function whenever a client is sampled by\n", - " the strategy to participate.\n", - " \"\"\"\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Construct a FlowerClient with its own dataset partition.\"\"\"\n", - "\n", - " # Let's get the partition corresponding to the i-th client\n", - " client_dataset = dataset.load_partition(int(cid), \"train\")\n", - "\n", - " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", - "\n", - " trainset = client_dataset_splits[\"train\"]\n", - " valset = client_dataset_splits[\"test\"]\n", - "\n", - " # Now we apply the transform to each batch.\n", - " trainloader = DataLoader(\n", - " trainset.with_transform(apply_transforms), batch_size=32, shuffle=True\n", - " )\n", - " valloader = DataLoader(valset.with_transform(apply_transforms), batch_size=32)\n", - "\n", - " # Create and return client\n", - " return FlowerClient(trainloader, valloader).to_client()\n", - "\n", - " return client_fn\n", - "\n", - "\n", - "client_fn_callback = get_client_fn(mnist_fds)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we are ready to launch the FL experiment using Flower simulation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "outputId": "9ad8dcea-8004-4c6e-a025-e168da636c88" - }, - "outputs": [], - "source": [ - "# With a dictionary, you tell Flower's VirtualClientEngine that each\n", - "# client needs exclusive access to these many resources in order to run\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", - "\n", - "# Let's disable tqdm progress bar in the main thread (used by the server)\n", - "disable_progress_bar()\n", - "\n", - "history = fl.simulation.start_simulation(\n", - " client_fn=client_fn_callback, # a callback to construct a client\n", - " num_clients=NUM_CLIENTS, # total number of clients in the experiment\n", - " config=fl.server.ServerConfig(num_rounds=10), # let's run for 10 rounds\n", - " strategy=strategy, # the strategy that will orchestrate the whole FL pipeline\n", - " client_resources=client_resources,\n", - " actor_kwargs={\n", - " \"on_actor_init_fn\": disable_progress_bar # disable tqdm on each actor/process spawning virtual clients\n", - " },\n", - ")" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Doing 10 rounds should take less than 2 minutes on a CPU-only Colab instance <-- Flower Simulation is fast! 🚀\n", - "\n", - "You can then use the resturned `History` object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtainined at the end of each round (including at the very beginning of the experiment) for the _global model_. This is want the function `evaluate_fn()` that we passed to the strategy reports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 508 - }, - "outputId": "d8eab106-cee9-4266-9082-0944882cdba8" - }, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With that, you built a Flower client, customized it's instantiation through the `client_fn`, customized the server-side execution through a `FedAvg` strategy configured for this workload, and started a simulation with 100 clients (each holding their own individual partition of the MNIST dataset).\n", - "\n", - "Next, you can continue to explore more advanced Flower topics:\n", - "\n", - "- Deploy server and clients on different machines using `start_server` and `start_client`\n", - "- Customize the server-side execution through custom strategies\n", - "- Customize the client-side execution through `config` dictionaries\n", - "\n", - "Get all resources you need!\n", - "\n", - "* **[DOCS]** Our complete documenation: https://flower.ai/docs/\n", - "* **[Examples]** All Flower examples: https://flower.ai/docs/examples/\n", - "* **[VIDEO]** Our Youtube channel: https://www.youtube.com/@flowerlabs\n", - "\n", - "Don't forget to join our Slack channel: https://flower.ai/join-slack/\n" - ] - } - ], - "metadata": { - "colab": { - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/simulation-pytorch/sim.py b/examples/simulation-pytorch/sim.py deleted file mode 100644 index a435db6d7724..000000000000 --- a/examples/simulation-pytorch/sim.py +++ /dev/null @@ -1,225 +0,0 @@ -import argparse -from collections import OrderedDict -from typing import Dict, List, Tuple - -import flwr as fl -import torch -from datasets import Dataset -from datasets.utils.logging import disable_progress_bar -from flwr.common import Metrics -from flwr.common.typing import Scalar -from flwr_datasets import FederatedDataset -from torch.utils.data import DataLoader - -from utils import Net, apply_transforms, test, train - -parser = argparse.ArgumentParser(description="Flower Simulation with PyTorch") - -parser.add_argument( - "--num_cpus", - type=int, - default=1, - help="Number of CPUs to assign to a virtual client", -) -parser.add_argument( - "--num_gpus", - type=float, - default=0.0, - help="Ratio of GPU memory to assign to a virtual client", -) - -NUM_CLIENTS = 100 -NUM_ROUNDS = 10 - - -# Flower client, adapted from Pytorch quickstart example -class FlowerClient(fl.client.NumPyClient): - def __init__(self, trainset, valset): - self.trainset = trainset - self.valset = valset - - # Instantiate model - self.model = Net() - - # Determine device - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) # send model to device - - def get_parameters(self, config): - return [val.cpu().numpy() for _, val in self.model.state_dict().items()] - - def fit(self, parameters, config): - set_params(self.model, parameters) - - # Read from config - batch, epochs = config["batch_size"], config["epochs"] - - # Construct dataloader - trainloader = DataLoader(self.trainset, batch_size=batch, shuffle=True) - - # Define optimizer - optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9) - # Train - train(self.model, trainloader, optimizer, epochs=epochs, device=self.device) - - # Return local model and statistics - return self.get_parameters({}), len(trainloader.dataset), {} - - def evaluate(self, parameters, config): - set_params(self.model, parameters) - - # Construct dataloader - valloader = DataLoader(self.valset, batch_size=64) - - # Evaluate - loss, accuracy = test(self.model, valloader, device=self.device) - - # Return statistics - return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)} - - -def get_client_fn(dataset: FederatedDataset): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(context) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - - # Let's get the partition corresponding to the i-th client - client_dataset = dataset.load_partition( - int(context.node_config["partition-id"]), "train" - ) - - # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) - - trainset = client_dataset_splits["train"] - valset = client_dataset_splits["test"] - - # Now we apply the transform to each batch. - trainset = trainset.with_transform(apply_transforms) - valset = valset.with_transform(apply_transforms) - - # Create and return client - return FlowerClient(trainset, valset).to_client() - - return client_fn - - -def fit_config(server_round: int) -> Dict[str, Scalar]: - """Return a configuration with static batch size and (local) epochs.""" - config = { - "epochs": 1, # Number of local epochs done by clients - "batch_size": 32, # Batch size to use by clients during fit() - } - return config - - -def set_params(model: torch.nn.ModuleList, params: List[fl.common.NDArrays]): - """Set model weights from a list of NumPy ndarrays.""" - params_dict = zip(model.state_dict().keys(), params) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - model.load_state_dict(state_dict, strict=True) - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for (federated) evaluation metrics, i.e. those returned by - the client's evaluate() method.""" - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def get_evaluate_fn( - centralized_testset: Dataset, -): - """Return an evaluation function for centralized evaluation.""" - - def evaluate( - server_round: int, parameters: fl.common.NDArrays, config: Dict[str, Scalar] - ): - """Use the entire CIFAR-10 test set for evaluation.""" - - # Determine device - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - model = Net() - set_params(model, parameters) - model.to(device) - - # Apply transform to dataset - testset = centralized_testset.with_transform(apply_transforms) - - # Disable tqdm for dataset preprocessing - disable_progress_bar() - - testloader = DataLoader(testset, batch_size=50) - loss, accuracy = test(model, testloader, device=device) - - return loss, {"accuracy": accuracy} - - return evaluate - - -# Download MNIST dataset and partition it -mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -centralized_testset = mnist_fds.load_split("test") - -from flwr.server import ServerAppComponents - - -def server_fn(context): - # Configure the strategy - strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for training - fraction_evaluate=0.05, # Sample 5% of available clients for evaluation - min_available_clients=10, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=weighted_average, # Aggregate federated metrics - evaluate_fn=get_evaluate_fn(centralized_testset), # Global evaluation function - ) - return ServerAppComponents( - strategy=strategy, config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS) - ) - - -# ClientApp for Flower-Next -client = fl.client.ClientApp( - client_fn=get_client_fn(mnist_fds), -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp(server_fn=server_fn) - - -def main(): - # Parse input arguments - args = parser.parse_args() - - # Resources to be assigned to each virtual client - client_resources = { - "num_cpus": args.num_cpus, - "num_gpus": args.num_gpus, - } - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn(mnist_fds), - num_clients=NUM_CLIENTS, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, - actor_kwargs={ - "on_actor_init_fn": disable_progress_bar # disable tqdm on each actor/process spawning virtual clients - }, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/simulation-pytorch/utils.py b/examples/simulation-pytorch/utils.py deleted file mode 100644 index 702e9886615e..000000000000 --- a/examples/simulation-pytorch/utils.py +++ /dev/null @@ -1,63 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision.transforms import Compose, Normalize, ToTensor - - -# transformation to convert images to tensors and apply normalization -def apply_transforms(batch): - transforms = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch - - -# Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz') -class Net(nn.Module): - def __init__(self, num_classes: int = 10) -> None: - super(Net, self).__init__() - self.conv1 = nn.Conv2d(1, 6, 5) - self.pool = nn.MaxPool2d(2, 2) - self.conv2 = nn.Conv2d(6, 16, 5) - self.fc1 = nn.Linear(16 * 4 * 4, 120) - self.fc2 = nn.Linear(120, 84) - self.fc3 = nn.Linear(84, num_classes) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.pool(F.relu(self.conv1(x))) - x = self.pool(F.relu(self.conv2(x))) - x = x.view(-1, 16 * 4 * 4) - x = F.relu(self.fc1(x)) - x = F.relu(self.fc2(x)) - x = self.fc3(x) - return x - - -# borrowed from Pytorch quickstart example -def train(net, trainloader, optim, epochs, device: str): - """Train the network on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - net.train() - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["image"].to(device), batch["label"].to(device) - optim.zero_grad() - loss = criterion(net(images), labels) - loss.backward() - optim.step() - - -# borrowed from Pytorch quickstart example -def test(net, testloader, device: str): - """Validate the network on the entire test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for data in testloader: - images, labels = data["image"].to(device), data["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy diff --git a/examples/simulation-tensorflow/README.md b/examples/simulation-tensorflow/README.md deleted file mode 100644 index 047cb4379659..000000000000 --- a/examples/simulation-tensorflow/README.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -tags: [basic, vision, fds, simulation] -dataset: [MNIST] -framework: [tensorflow, Keras] ---- - -# Flower Simulation example using TensorFlow/Keras - -This introductory example uses the simulation capabilities of Flower to simulate a large number of clients on a single machine. Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for a deep dive into how Flower simulation works. This example uses [Flower Datasets](https://flower.ai/docs/datasets/) to download, partition and preprocess the MNIST dataset. This examples uses 100 clients by default. - -## Running the example (via Jupyter Notebook) - -Run the example on Google Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/adap/flower/blob/main/examples/simulation-tensorflow/sim.ipynb) - -Alternatively, you can run `sim.ipynb` locally or in any other Jupyter environment. - -## Running the example - -Start by cloning the code example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/simulation-tensorflow . && rm -rf flower && cd simulation-tensorflow -``` - -This will create a new directory called `simulation-tensorflow` containing the following files: - -``` --- README.md <- Your're reading this right now --- sim.ipynb <- Example notebook --- sim.py <- Example code --- pyproject.toml <- Example dependencies --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `tensorflow` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -### Run with `start_simulation()` - -Ensure you have activated your environment then: - -```bash -# and then run the example -python sim.py -``` - -You can adjust the CPU/GPU resources you assign to each of your virtual clients. By default, your clients will only use 2xCPU core. For example: - -```bash -# Will assign 2xCPUs to each client -python sim.py --num_cpus=2 - -# Will assign 2xCPUs and 25% of the GPU's VRAM to each client -# This means that you can have 4 concurrent clients on each GPU -# (assuming you have enough CPUs) -python sim.py --num_cpus=2 --num_gpus=0.25 -``` - -Because TensorFlow by default maps all the available VRAM, we need to [enable GPU memory growth](https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth), see how it is done in the example (`sim.py`) for both the "main" process (where the server/strategy runs) and for the clients (using the `actor_kwargs`) - -### Run with Flower Next (preview) - -Ensure you have activated your environment, then execute the command below. All `ClientApp` instances will run on CPU but the `ServerApp` will run on the GPU if one is available. Note that this is the case because the `Simulation Engine` only exposes certain resources to the `ClientApp` (based on the `client_resources` in `--backend-config`). For TensorFlow simulations, it is desirable to make use of TF's [memory growth](https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_memory_growth) feature. You can enable that easily with the `--enable-tf-gpu-growth` flag. - -```bash -# Run with the default backend-config. -# `--server-app` points to the `server` object in the sim.py file in this example. -# `--client-app` points to the `client` object in the sim.py file in this example. -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 --enable-tf-gpu-growth -``` - -You can change the default resources assigned to each `ClientApp` using the `--backend-config` argument. - -```bash -# Tells the VCE to reserve 2x CPUs and 25% of available VRAM for each ClientApp -flower-simulation --client-app=sim:client --server-app=sim:server --num-supernodes=100 \ - --backend-config='{"client_resources": {"num_cpus":2, "num_gpus":0.25}}' --enable-tf-gpu-growth -``` - -Take a look at the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html) for more details on how you can customise your simulation. diff --git a/examples/simulation-tensorflow/pyproject.toml b/examples/simulation-tensorflow/pyproject.toml deleted file mode 100644 index ad8cc2032b2d..000000000000 --- a/examples/simulation-tensorflow/pyproject.toml +++ /dev/null @@ -1,16 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "simulation-tensorflow" -version = "0.1.0" -description = "Federated Learning Simulation with Flower and Tensorflow" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -tensorflow = { version = "^2.9.1, !=2.11.1", markers = "platform_machine == 'x86_64'" } -tensorflow-macos = { version = "^2.9.1, !=2.11.1", markers = "sys_platform == 'darwin' and platform_machine == 'arm64'" } diff --git a/examples/simulation-tensorflow/requirements.txt b/examples/simulation-tensorflow/requirements.txt deleted file mode 100644 index bb69a87be1b4..000000000000 --- a/examples/simulation-tensorflow/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -tensorflow-macos>=2.9.1, != 2.11.1 ; sys_platform == "darwin" and platform_machine == "arm64" -tensorflow-cpu>=2.9.1, != 2.11.1 ; platform_machine == "x86_64" diff --git a/examples/simulation-tensorflow/sim.ipynb b/examples/simulation-tensorflow/sim.ipynb deleted file mode 100644 index 26b7260b5f1c..000000000000 --- a/examples/simulation-tensorflow/sim.ipynb +++ /dev/null @@ -1,347 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Flower Quickstart (Simulation with TensorFlow/Keras)\n", - "\n", - "Welcome to Flower, a friendly federated learning framework!\n", - "\n", - "In this notebook, we'll simulate a federated learning system with 100 clients. The clients will use TensorFlow/Keras to define model training and evaluation. Let's start by installing Flower (published as `flwr` on PyPI) with the `simulation` extra:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install -q flwr[\"simulation\"] tensorflow\n", - "!pip install -q flwr_datasets[\"vision\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's also install Matplotlib so we can make some plots once the simulation is completed" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip install matplotlib" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we import the required dependencies. The most important imports are Flower (`flwr`) and TensorFlow:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from typing import Dict, List, Tuple\n", - "\n", - "import tensorflow as tf\n", - "\n", - "import flwr as fl\n", - "from flwr.common import Metrics\n", - "from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth\n", - "\n", - "from datasets import Dataset\n", - "from flwr_datasets import FederatedDataset\n", - "\n", - "VERBOSE = 0\n", - "NUM_CLIENTS = 100" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's start by defining the model we want to federated. Since we will be working with MNIST, using a fully connected model is sufficient. You can of course customize this model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_model():\n", - " \"\"\"Constructs a simple model architecture suitable for MNIST.\"\"\"\n", - " model = tf.keras.models.Sequential(\n", - " [\n", - " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", - " tf.keras.layers.Dense(128, activation=\"relu\"),\n", - " tf.keras.layers.Dropout(0.2),\n", - " tf.keras.layers.Dense(10, activation=\"softmax\"),\n", - " ]\n", - " )\n", - " model.compile(\"adam\", \"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n", - " return model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With that out of the way, let's move on to the interesting bits. Federated learning systems consist of a server and multiple clients. In Flower, we create clients by implementing subclasses of `flwr.client.Client` or `flwr.client.NumPyClient`. We use `NumPyClient` in this tutorial because it is easier to implement and requires us to write less boilerplate.\n", - "\n", - "To implement the Flower client, we create a subclass of `flwr.client.NumPyClient` and implement the three methods `get_parameters`, `fit`, and `evaluate`:\n", - "\n", - "- `get_parameters`: Return the current local model parameters\n", - "- `fit`: Receive model parameters from the server, train the model parameters on the local data, and return the (updated) model parameters to the server \n", - "- `evaluate`: Received model parameters from the server, evaluate the model parameters on the local data, and return the evaluation result to the server\n", - "\n", - "We mentioned that our clients will use TensorFlow/Keras for the model training and evaluation. Keras models provide methods that make the implementation straightforward: we can update the local model with server-provides parameters through `model.set_weights`, we can train/evaluate the model through `fit/evaluate`, and we can get the updated model parameters through `model.get_weights`.\n", - "\n", - "Let's see a simple implementation:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class FlowerClient(fl.client.NumPyClient):\n", - " def __init__(self, trainset, valset) -> None:\n", - " # Create model\n", - " self.model = get_model()\n", - " self.trainset = trainset\n", - " self.valset = valset\n", - "\n", - " def get_parameters(self, config):\n", - " return self.model.get_weights()\n", - "\n", - " def fit(self, parameters, config):\n", - " self.model.set_weights(parameters)\n", - " self.model.fit(self.trainset, epochs=1, verbose=VERBOSE)\n", - " return self.model.get_weights(), len(self.trainset), {}\n", - "\n", - " def evaluate(self, parameters, config):\n", - " self.model.set_weights(parameters)\n", - " loss, acc = self.model.evaluate(self.valset, verbose=VERBOSE)\n", - " return loss, len(self.valset), {\"accuracy\": acc}" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our class `FlowerClient` defines how local training/evaluation will be performed and allows Flower to call the local training/evaluation through `fit` and `evaluate`. Each instance of `FlowerClient` represents a *single client* in our federated learning system. Federated learning systems have multiple clients (otherwise, there's not much to federate, is there?), so each client will be represented by its own instance of `FlowerClient`. If we have, for example, three clients in our workload, we'd have three instances of `FlowerClient`. Flower calls `FlowerClient.fit` on the respective instance when the server selects a particular client for training (and `FlowerClient.evaluate` for evaluation).\n", - "\n", - "In this notebook, we want to simulate a federated learning system with 100 clients on a single machine. This means that the server and all 100 clients will live on a single machine and share resources such as CPU, GPU, and memory. Having 100 clients would mean having 100 instances of `FlowerClient` in memory. Doing this on a single machine can quickly exhaust the available memory resources, even if only a subset of these clients participates in a single round of federated learning.\n", - "\n", - "In addition to the regular capabilities where server and clients run on multiple machines, Flower, therefore, provides special simulation capabilities that create `FlowerClient` instances only when they are actually necessary for training or evaluation. To enable the Flower framework to create clients when necessary, we need to implement a function called `client_fn` that creates a `FlowerClient` instance on demand. Flower calls `client_fn` whenever it needs an instance of one particular client to call `fit` or `evaluate` (those instances are usually discarded after use). Clients are identified by a client ID, or short `cid`. The `cid` can be used, for example, to load different local data partitions for each client" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now define four auxiliary functions for this example (note the last two are entirely optional):\n", - "* `get_client_fn()`: Is a function that returns another function. The returned `client_fn` will be executed by Flower's VirtualClientEngine each time a new _virtual_ client (i.e. a client that is simulated in a Python process) needs to be spawn. When are virtual clients spawned? Each time the strategy samples them to do either `fit()` (i.e. train the global model on the local data of a particular client) or `evaluate()` (i.e. evaluate the global model on the validation set of a given client).\n", - "\n", - "* `weighted_average()`: This is an optional function to pass to the strategy. It will be executed after an evaluation round (i.e. when client run `evaluate()`) and will aggregate the metrics clients return. In this example, we use this function to compute the weighted average accuracy of clients doing `evaluate()`.\n", - "\n", - "* `get_evaluate_fn()`: This is again a function that returns another function. The returned function will be executed by the strategy at the end of a `fit()` round and after a new global model has been obtained after aggregation. This is an optional argument for Flower strategies. In this example, we use the whole MNIST test set to perform this server-side evaluation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_client_fn(dataset: FederatedDataset):\n", - " \"\"\"Return a function to construct a client.\n", - "\n", - " The VirtualClientEngine will execute this function whenever a client is sampled by\n", - " the strategy to participate.\n", - " \"\"\"\n", - "\n", - " def client_fn(cid: str) -> fl.client.Client:\n", - " \"\"\"Construct a FlowerClient with its own dataset partition.\"\"\"\n", - "\n", - " # Extract partition for client with id = cid\n", - " client_dataset = dataset.load_partition(int(cid), \"train\")\n", - "\n", - " # Now let's split it into train (90%) and validation (10%)\n", - " client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n", - "\n", - " trainset = client_dataset_splits[\"train\"].to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=32\n", - " )\n", - " valset = client_dataset_splits[\"test\"].to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=64\n", - " )\n", - "\n", - " # Create and return client\n", - " return FlowerClient(trainset, valset).to_client()\n", - "\n", - " return client_fn\n", - "\n", - "\n", - "def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n", - " \"\"\"Aggregation function for (federated) evaluation metrics, i.e. those returned by\n", - " the client's evaluate() method.\"\"\"\n", - " # Multiply accuracy of each client by number of examples used\n", - " accuracies = [num_examples * m[\"accuracy\"] for num_examples, m in metrics]\n", - " examples = [num_examples for num_examples, _ in metrics]\n", - "\n", - " # Aggregate and return custom metric (weighted average)\n", - " return {\"accuracy\": sum(accuracies) / sum(examples)}\n", - "\n", - "\n", - "def get_evaluate_fn(testset: Dataset):\n", - " \"\"\"Return an evaluation function for server-side (i.e. centralised) evaluation.\"\"\"\n", - "\n", - " # The `evaluate` function will be called after every round by the strategy\n", - " def evaluate(\n", - " server_round: int,\n", - " parameters: fl.common.NDArrays,\n", - " config: Dict[str, fl.common.Scalar],\n", - " ):\n", - " model = get_model() # Construct the model\n", - " model.set_weights(parameters) # Update model with the latest parameters\n", - " loss, accuracy = model.evaluate(testset, verbose=VERBOSE)\n", - " return loss, {\"accuracy\": accuracy}\n", - "\n", - " return evaluate" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now have `FlowerClient` which defines client-side training and evaluation, and `client_fn`, which allows Flower to create `FlowerClient` instances whenever it needs to call `fit` or `evaluate` on one particular client. The last step is to start the actual simulation using `flwr.simulation.start_simulation`. \n", - "\n", - "The function `start_simulation` accepts a number of arguments, amongst them the `client_fn` used to create `FlowerClient` instances, the number of clients to simulate `num_clients`, the number of rounds `num_rounds`, and the strategy. The strategy encapsulates the federated learning approach/algorithm, for example, *Federated Averaging* (FedAvg).\n", - "\n", - "Flower comes with a number of built-in strategies, but we can also use our own strategy implementations to customize nearly all aspects of the federated learning approach. For this example, we use the built-in `FedAvg` implementation and customize it using a few basic parameters. The last step is the actual call to `start_simulation` which - you guessed it - actually starts the simulation.\n", - "\n", - "We can use [Flower Datasets](https://flower.ai/docs/datasets/) to effortlessly obtain an off-the-shelf partitioned dataset or partition one that isn't pre-partitioned. Let's choose MNIST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Enable GPU growth in your main process\n", - "enable_tf_gpu_growth()\n", - "\n", - "# Download MNIST dataset and partition it\n", - "mnist_fds = FederatedDataset(dataset=\"mnist\", partitioners={\"train\": NUM_CLIENTS})\n", - "# Get the whole test set for centralised evaluation\n", - "centralized_testset = mnist_fds.load_split(\"test\").to_tf_dataset(\n", - " columns=\"image\", label_cols=\"label\", batch_size=64\n", - ")\n", - "\n", - "\n", - "# Create FedAvg strategy\n", - "strategy = fl.server.strategy.FedAvg(\n", - " fraction_fit=0.1, # Sample 10% of available clients for training\n", - " fraction_evaluate=0.05, # Sample 5% of available clients for evaluation\n", - " min_fit_clients=10, # Never sample less than 10 clients for training\n", - " min_evaluate_clients=5, # Never sample less than 5 clients for evaluation\n", - " min_available_clients=int(\n", - " NUM_CLIENTS * 0.75\n", - " ), # Wait until at least 75 clients are available\n", - " evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics\n", - " evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function\n", - ")\n", - "\n", - "# With a dictionary, you tell Flower's VirtualClientEngine that each\n", - "# client needs exclusive access to these many resources in order to run\n", - "client_resources = {\"num_cpus\": 1, \"num_gpus\": 0.0}\n", - "\n", - "# Start simulation\n", - "history = fl.simulation.start_simulation(\n", - " client_fn=get_client_fn(mnist_fds),\n", - " num_clients=NUM_CLIENTS,\n", - " config=fl.server.ServerConfig(num_rounds=10),\n", - " strategy=strategy,\n", - " client_resources=client_resources,\n", - " actor_kwargs={\n", - " \"on_actor_init_fn\": enable_tf_gpu_growth # Enable GPU growth upon actor init.\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can then use the resturned History object to either save the results to disk or do some visualisation (or both of course, or neither if you like chaos). Below you can see how you can plot the centralised accuracy obtainined at the end of each round (including at the very beginning of the experiment) for the global model. This is want the function `evaluate_fn()` that we passed to the strategy reports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "\n", - "print(f\"{history.metrics_centralized = }\")\n", - "\n", - "global_accuracy_centralised = history.metrics_centralized[\"accuracy\"]\n", - "round = [data[0] for data in global_accuracy_centralised]\n", - "acc = [100.0 * data[1] for data in global_accuracy_centralised]\n", - "plt.plot(round, acc)\n", - "plt.grid()\n", - "plt.ylabel(\"Accuracy (%)\")\n", - "plt.xlabel(\"Round\")\n", - "plt.title(\"MNIST - IID - 100 clients with 10 clients per round\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Congratulations! With that, you built a Flower client, customized it's instantiation through the `client_fn`, customized the server-side execution through a `FedAvg` strategy configured for this workload, and started a simulation with 100 clients (each holding their own individual partition of the MNIST dataset).\n", - "\n", - "Next, you can continue to explore more advanced Flower topics:\n", - "\n", - "- Deploy server and clients on different machines using `start_server` and `start_client`\n", - "- Customize the server-side execution through custom strategies\n", - "- Customize the client-side execution through `config` dictionaries\n", - "\n", - "Get all resources you need!\n", - "\n", - "* **[DOCS]** Our complete documenation: https://flower.ai/docs/\n", - "* **[Examples]** All Flower examples: https://flower.ai/docs/examples/\n", - "* **[VIDEO]** Our Youtube channel: https://www.youtube.com/@flowerlabs\n", - "\n", - "Don't forget to join our Slack channel: https://flower.ai/join-slack/" - ] - } - ], - "metadata": { - "colab": { - "name": "flower.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/simulation-tensorflow/sim.py b/examples/simulation-tensorflow/sim.py deleted file mode 100644 index 1ae2db41ab4b..000000000000 --- a/examples/simulation-tensorflow/sim.py +++ /dev/null @@ -1,186 +0,0 @@ -import argparse -import os -from typing import Dict, List, Tuple - -import flwr as fl -import tensorflow as tf -from datasets import Dataset -from flwr.common import Metrics -from flwr.simulation.ray_transport.utils import enable_tf_gpu_growth -from flwr_datasets import FederatedDataset - -# Make TensorFlow logs less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - -parser = argparse.ArgumentParser(description="Flower Simulation with Tensorflow/Keras") - -parser.add_argument( - "--num_cpus", - type=int, - default=1, - help="Number of CPUs to assign to a virtual client", -) -parser.add_argument( - "--num_gpus", - type=float, - default=0.0, - help="Ratio of GPU memory to assign to a virtual client", -) - -NUM_CLIENTS = 100 -NUM_ROUNDS = 10 -VERBOSE = 0 - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, trainset, valset) -> None: - # Create model - self.model = get_model() - self.trainset = trainset - self.valset = valset - - def get_parameters(self, config): - return self.model.get_weights() - - def fit(self, parameters, config): - self.model.set_weights(parameters) - self.model.fit(self.trainset, epochs=1, verbose=VERBOSE) - return self.model.get_weights(), len(self.trainset), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - loss, acc = self.model.evaluate(self.valset, verbose=VERBOSE) - return loss, len(self.valset), {"accuracy": acc} - - -def get_model(): - """Constructs a simple model architecture suitable for MNIST.""" - model = tf.keras.models.Sequential( - [ - tf.keras.layers.Flatten(input_shape=(28, 28)), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dropout(0.2), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"]) - return model - - -def get_client_fn(dataset: FederatedDataset): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - - # Extract partition for client with id = cid - client_dataset = dataset.load_partition(int(cid), "train") - - # Now let's split it into train (90%) and validation (10%) - client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42) - - trainset = client_dataset_splits["train"].to_tf_dataset( - columns="image", label_cols="label", batch_size=32 - ) - valset = client_dataset_splits["test"].to_tf_dataset( - columns="image", label_cols="label", batch_size=64 - ) - - # Create and return client - return FlowerClient(trainset, valset).to_client() - - return client_fn - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for (federated) evaluation metrics. - - It ill aggregate those metrics returned by the client's evaluate() method. - """ - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"accuracy": sum(accuracies) / sum(examples)} - - -def get_evaluate_fn(testset: Dataset): - """Return an evaluation function for server-side (i.e. centralised) evaluation.""" - - # The `evaluate` function will be called after every round by the strategy - def evaluate( - server_round: int, - parameters: fl.common.NDArrays, - config: Dict[str, fl.common.Scalar], - ): - model = get_model() # Construct the model - model.set_weights(parameters) # Update model with the latest parameters - loss, accuracy = model.evaluate(testset, verbose=VERBOSE) - return loss, {"accuracy": accuracy} - - return evaluate - - -# Download MNIST dataset and partition it -mnist_fds = FederatedDataset(dataset="mnist", partitioners={"train": NUM_CLIENTS}) -# Get the whole test set for centralised evaluation -centralized_testset = mnist_fds.load_split("test").to_tf_dataset( - columns="image", label_cols="label", batch_size=64 -) - -# Create FedAvg strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=0.1, # Sample 10% of available clients for training - fraction_evaluate=0.05, # Sample 5% of available clients for evaluation - min_fit_clients=10, # Never sample less than 10 clients for training - evaluate_metrics_aggregation_fn=weighted_average, # aggregates federated metrics - evaluate_fn=get_evaluate_fn(centralized_testset), # global evaluation function -) - - -# ClientApp for Flower-Next -client = fl.client.ClientApp( - client_fn=get_client_fn(mnist_fds), -) - -# ServerApp for Flower-Next -server = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=NUM_ROUNDS), - strategy=strategy, -) - - -def main() -> None: - # Parse input arguments - args = parser.parse_args() - - # With a dictionary, you tell Flower's VirtualClientEngine that each - # client needs exclusive access to these many resources in order to run - client_resources = { - "num_cpus": args.num_cpus, - "num_gpus": args.num_gpus, - } - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn(mnist_fds), - num_clients=NUM_CLIENTS, - config=fl.server.ServerConfig(NUM_ROUNDS), - strategy=strategy, - client_resources=client_resources, - actor_kwargs={ - "on_actor_init_fn": enable_tf_gpu_growth # Enable GPU growth upon actor init - # does nothing if `num_gpus` in client_resources is 0.0 - }, - ) - - -if __name__ == "__main__": - # Enable GPU growth in your main process - enable_tf_gpu_growth() - main() diff --git a/examples/sklearn-logreg-mnist/README.md b/examples/sklearn-logreg-mnist/README.md index b56dbfc5dd3a..7c75e2ecfb85 100644 --- a/examples/sklearn-logreg-mnist/README.md +++ b/examples/sklearn-logreg-mnist/README.md @@ -55,7 +55,7 @@ flwr run . You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flwr run . --run-config num-server-rounds=5,fraction-fit=0.25 +flwr run . --run-config "num-server-rounds=5 fraction-fit=0.25" ``` > \[!TIP\] diff --git a/examples/sklearn-logreg-mnist/pyproject.toml b/examples/sklearn-logreg-mnist/pyproject.toml index be1e4810b312..75dae57a0a40 100644 --- a/examples/sklearn-logreg-mnist/pyproject.toml +++ b/examples/sklearn-logreg-mnist/pyproject.toml @@ -12,7 +12,7 @@ authors = [ { name = "Kaushik Amar Das", email = "kaushik.das@iiitg.ac.in" }, ] dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "numpy<2.0.0", "scikit-learn~=1.2.2", diff --git a/examples/tensorflow-privacy/README.md b/examples/tensorflow-privacy/README.md index 8156f92f60c9..af85865346bb 100644 --- a/examples/tensorflow-privacy/README.md +++ b/examples/tensorflow-privacy/README.md @@ -1,66 +1,64 @@ --- -tags: [basic, vision, fds, privacy, dp] +tags: [DP, DP-SGD, basic, vision, fds, privacy] dataset: [MNIST] framework: [tensorflow] --- # Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine -In this example, we demonstrate how to train a model with sample-level differential privacy (DP) using Flower. We employ TensorFlow and integrate the tensorflow-privacy Engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. +In this example, we demonstrate how to train a model with sample-level differential privacy (DP) using Flower. We employ TensorFlow and integrate the tensorflow-privacy engine to achieve sample-level differential privacy. This setup ensures robust privacy guarantees during the client training phase. For more information about DP in Flower please refer to the [tutorial](https://flower.ai/docs/framework/how-to-use-differential-privacy.html). For additional information about tensorflow-privacy, visit the official [website](https://www.tensorflow.org/responsible_ai/privacy/guide). -## Environments Setup +## Set up the project -Start by cloning the example. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project + +Start by cloning the example project: ```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/tensorflow-privacy . && rm -rf flower && cd tensorflow-privacy +git clone --depth=1 https://github.com/adap/flower.git \ + && mv flower/examples/tensorflow-privacy . \ + && rm -rf flower \ + && cd tensorflow-privacy ``` This will create a new directory called `tensorflow-privacy` containing the following files: ```shell --- pyproject.toml --- client.py --- server.py --- README.md +tensorflow-privacy +├── tf_privacy +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training, and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -### Installing dependencies - -Project dependencies are defined in `pyproject.toml`. Install them with: - -```shell -pip install . -``` +> \[!NOTE\] +> Please note that, at the current state, users cannot set `NodeConfig` for simulated `ClientApp`s. For this reason, the hyperparameter `noise_multiplier` is set in the `client_fn` method based on a condition check on `partition_id`. This will be modified in a future version of Flower to allow users to set `NodeConfig` for simulated `ClientApp`s. -## Run Flower with tensorflow-privacy and TensorFlow +### Install dependencies and project -### 1. Start the long-running Flower server (SuperLink) +Install the dependencies defined in `pyproject.toml` as well as the `tf_privacy` package. -```bash -flower-superlink --insecure +```shell +# From a new python environment, run: +pip install -e . ``` -### 2. Start the long-running Flower clients (SuperNodes) +## Run the project -Start 2 Flower `SuperNodes` in 2 separate terminal windows, using: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```bash -flower-client-app client:appA --insecure -``` +### Run with the Simulation Engine ```bash -flower-client-app client:appB --insecure +flwr run . ``` -tensorflow-privacy hyperparameters can be passed for each client in `ClientApp` instantiation (in `client.py`). In this example, `noise_multiplier=1.5` and `noise_multiplier=1` are used for the first and second client respectively. - -### 3. Run the Flower App - -With both the long-running server (SuperLink) and two clients (SuperNode) up and running, we can now run the actual Flower App: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -flower-server-app server:app --insecure +flwr run . --run-config "l2-norm-clip=1.5 num-server-rounds=5" ``` diff --git a/examples/tensorflow-privacy/client.py b/examples/tensorflow-privacy/client.py deleted file mode 100644 index 85ed8a3d4245..000000000000 --- a/examples/tensorflow-privacy/client.py +++ /dev/null @@ -1,150 +0,0 @@ -import argparse -import os - -import tensorflow as tf -import tensorflow_privacy -from flwr.client import ClientApp, NumPyClient -from flwr_datasets import FederatedDataset -from tensorflow_privacy.privacy.analysis.compute_dp_sgd_privacy_lib import ( - compute_dp_sgd_privacy_statement, -) - -# Make TensorFlow log less verbose -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - -def load_data(partition_id, batch_size): - fds = FederatedDataset(dataset="mnist", partitioners={"train": 2}) - partition = fds.load_partition(partition_id, "train") - partition.set_format("numpy") - - # Divide data on each node: 80% train, 20% test - partition = partition.train_test_split(test_size=0.2, seed=42) - x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] - x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] - - # Adjust the size of the training dataset to make it evenly divisible by the batch size - remainder = len(x_train) % batch_size - if remainder != 0: - x_train = x_train[:-remainder] - y_train = y_train[:-remainder] - - return (x_train, y_train), (x_test, y_test) - - -class FlowerClient(NumPyClient): - def __init__( - self, - model, - train_data, - test_data, - l2_norm_clip, - noise_multiplier, - num_microbatches, - learning_rate, - batch_size, - ) -> None: - super().__init__() - self.model = model - self.x_train, self.y_train = train_data - self.x_test, self.y_test = test_data - self.noise_multiplier = noise_multiplier - self.l2_norm_clip = l2_norm_clip - self.num_microbatches = num_microbatches - self.learning_rate = learning_rate - self.batch_size = batch_size - if self.batch_size % self.num_microbatches != 0: - raise ValueError( - f"Batch size {self.batch_size} is not divisible by the number of microbatches {self.num_microbatches}" - ) - - self.optimizer = tensorflow_privacy.DPKerasSGDOptimizer( - l2_norm_clip=l2_norm_clip, - noise_multiplier=noise_multiplier, - num_microbatches=num_microbatches, - learning_rate=learning_rate, - ) - loss = tf.keras.losses.SparseCategoricalCrossentropy( - reduction=tf.losses.Reduction.NONE - ) - self.model.compile(optimizer=self.optimizer, loss=loss, metrics=["accuracy"]) - - def get_parameters(self, config): - return self.model.get_weights() - - def fit(self, parameters, config): - self.model.set_weights(parameters) - - self.model.fit( - self.x_train, - self.y_train, - epochs=1, - batch_size=self.batch_size, - ) - - compute_dp_sgd_privacy_statement( - number_of_examples=self.x_train.shape[0], - batch_size=self.batch_size, - num_epochs=1, - noise_multiplier=self.noise_multiplier, - delta=1e-5, - ) - - return self.model.get_weights(), len(self.x_train), {} - - def evaluate(self, parameters, config): - self.model.set_weights(parameters) - self.model.compile( - optimizer=self.optimizer, - loss="sparse_categorical_crossentropy", - metrics=["accuracy"], - ) - loss, accuracy = self.model.evaluate(self.x_test, self.y_test) - return loss, len(self.x_test), {"accuracy": accuracy} - - -def client_fn_parameterized( - partition_id, - noise_multiplier, - l2_norm_clip=1.0, - num_microbatches=64, - learning_rate=0.01, - batch_size=64, -): - def client_fn(cid: str): - model = tf.keras.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=(28, 28, 1)), - tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), - tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), - tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), - tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), - tf.keras.layers.Flatten(), - tf.keras.layers.Dense(128, activation="relu"), - tf.keras.layers.Dense(10, activation="softmax"), - ] - ) - train_data, test_data = load_data( - partition_id=partition_id, batch_size=batch_size - ) - return FlowerClient( - model, - train_data, - test_data, - noise_multiplier, - l2_norm_clip, - num_microbatches, - learning_rate, - batch_size, - ).to_client() - - return client_fn - - -appA = ClientApp( - client_fn=client_fn_parameterized(partition_id=0, noise_multiplier=1.0), -) - -appB = ClientApp( - client_fn=client_fn_parameterized(partition_id=1, noise_multiplier=1.5), -) diff --git a/examples/tensorflow-privacy/pyproject.toml b/examples/tensorflow-privacy/pyproject.toml index 884ba3b5f07b..b404f7f183a0 100644 --- a/examples/tensorflow-privacy/pyproject.toml +++ b/examples/tensorflow-privacy/pyproject.toml @@ -4,19 +4,36 @@ build-backend = "hatchling.build" [project] name = "tensorflow-privacy-fl" -version = "0.1.0" +version = "1.0.0" description = "Sample-level Differential Privacy with Tensorflow-Privacy in Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets[vision]>=0.1.0,<1.0.0", + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", "tensorflow-estimator~=2.4", "tensorflow-probability~=0.22.0", "tensorflow>=2.4.0,<=2.15.0", - "tensorflow-privacy == 0.9.0" + "tensorflow-privacy == 0.9.0", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "tf_privacy.server_app:app" +clientapp = "tf_privacy.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +l2-norm-clip = 1.0 +num-microbatches = 64 +learning-rate = 0.01 +batch-size = 64 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 2 diff --git a/examples/tensorflow-privacy/server.py b/examples/tensorflow-privacy/server.py deleted file mode 100644 index 5b2ac6a3c4df..000000000000 --- a/examples/tensorflow-privacy/server.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import List, Tuple - -from flwr.common import Metrics -from flwr.server import ServerApp, ServerConfig -from flwr.server.strategy import FedAvg - - -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - return {"accuracy": sum(accuracies) / sum(examples)} - - -strategy = FedAvg(evaluate_metrics_aggregation_fn=weighted_average) - -config = ServerConfig(num_rounds=3) - -app = ServerApp( - config=config, - strategy=strategy, -) diff --git a/examples/tensorflow-privacy/tf_privacy/__init__.py b/examples/tensorflow-privacy/tf_privacy/__init__.py new file mode 100644 index 000000000000..252b33cdd1c5 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/__init__.py @@ -0,0 +1 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" diff --git a/examples/tensorflow-privacy/tf_privacy/client_app.py b/examples/tensorflow-privacy/tf_privacy/client_app.py new file mode 100644 index 000000000000..977d98bbbe43 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/client_app.py @@ -0,0 +1,93 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +import os + +import tensorflow as tf +import tensorflow_privacy +from flwr.client import ClientApp, NumPyClient +from tensorflow_privacy.privacy.analysis.compute_dp_sgd_privacy_lib import ( + compute_dp_sgd_privacy_statement, +) +from flwr.common import Context + +from tf_privacy.task import load_data, load_model +import numpy as np + + +# Make TensorFlow log less verbose +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + +class FlowerClient(NumPyClient): + def __init__( + self, + train_data, + test_data, + noise_multiplier, + run_config, + ) -> None: + super().__init__() + self.model = load_model() + self.x_train, self.y_train = train_data + self.x_train = np.expand_dims(self.x_train, axis=-1) + self.x_test, self.y_test = test_data + self.x_test = np.expand_dims(self.x_test, axis=-1) + self.noise_multiplier = noise_multiplier + self.run_config = run_config + if self.run_config["batch-size"] % self.run_config["num-microbatches"] != 0: + raise ValueError( + f"Batch size {self.run_config['batch-size']} is not divisible by the number of microbatches {self.run_config['num-microbatches']}" + ) + + self.optimizer = tensorflow_privacy.DPKerasSGDOptimizer( + l2_norm_clip=self.run_config["l2-norm-clip"], + noise_multiplier=self.noise_multiplier, + num_microbatches=self.run_config["num-microbatches"], + learning_rate=self.run_config["learning-rate"], + ) + loss = tf.keras.losses.SparseCategoricalCrossentropy( + reduction=tf.losses.Reduction.NONE + ) + self.model.compile(optimizer=self.optimizer, loss=loss, metrics=["accuracy"]) + + def fit(self, parameters, config): + self.model.set_weights(parameters) + self.model.fit( + self.x_train, + self.y_train, + epochs=1, + batch_size=self.run_config["batch-size"], + ) + + dp_statement = compute_dp_sgd_privacy_statement( + number_of_examples=self.x_train.shape[0], + batch_size=self.run_config["batch-size"], + num_epochs=1, + noise_multiplier=self.noise_multiplier, + delta=1e-5, + ) + print(dp_statement) + + return self.model.get_weights(), len(self.x_train), {} + + def evaluate(self, parameters, config): + self.model.set_weights(parameters) + loss, accuracy = self.model.evaluate(self.x_test, self.y_test) + return loss, len(self.x_test), {"accuracy": accuracy} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + run_config = context.run_config + noise_multiplier = 1.0 if partition_id % 2 == 0 else 1.5 + + train_data, test_data = load_data( + partition_id=partition_id, + num_partitions=context.node_config["num-partitions"], + batch_size=context.run_config["batch-size"], + ) + + return FlowerClient(train_data, test_data, noise_multiplier, run_config).to_client() + + +app = ClientApp(client_fn=client_fn) diff --git a/examples/tensorflow-privacy/tf_privacy/server_app.py b/examples/tensorflow-privacy/tf_privacy/server_app.py new file mode 100644 index 000000000000..5348492a3ac4 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/server_app.py @@ -0,0 +1,31 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +from typing import List, Tuple + +from flwr.common import Metrics +from flwr.server import ServerApp, ServerConfig, ServerAppComponents +from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters +from .task import load_model + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context) -> ServerAppComponents: + parameters = ndarrays_to_parameters(load_model().get_weights()) + strategy = FedAvg( + evaluate_metrics_aggregation_fn=weighted_average, + initial_parameters=parameters, + ) + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(config=config, strategy=strategy) + + +app = ServerApp(server_fn=server_fn) diff --git a/examples/tensorflow-privacy/tf_privacy/task.py b/examples/tensorflow-privacy/tf_privacy/task.py new file mode 100644 index 000000000000..7bbf2a3e9c09 --- /dev/null +++ b/examples/tensorflow-privacy/tf_privacy/task.py @@ -0,0 +1,52 @@ +"""tf_privacy: Training with Sample-Level Differential Privacy using TensorFlow-Privacy Engine.""" + +import tensorflow as tf + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + +fds = None # Cache FederatedDataset + + +def load_model(): + model = tf.keras.Sequential( + [ + tf.keras.layers.InputLayer(input_shape=(28, 28, 1)), + tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), + tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), + tf.keras.layers.Flatten(), + tf.keras.layers.Dense(128, activation="relu"), + tf.keras.layers.Dense(10, activation="softmax"), + ] + ) + + return model + + +def load_data(partition_id: int, num_partitions: int, batch_size): + + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="ylecun/mnist", + partitioners={"train": partitioner}, + ) + + partition = fds.load_partition(partition_id) + partition.set_format("numpy") + + # Divide data on each node: 80% train, 20% test + partition = partition.train_test_split(test_size=0.2, seed=42) + x_train, y_train = partition["train"]["image"] / 255.0, partition["train"]["label"] + x_test, y_test = partition["test"]["image"] / 255.0, partition["test"]["label"] + + # Adjust the size of the training dataset to make it evenly divisible by the batch size + remainder = len(x_train) % batch_size + if remainder != 0: + x_train = x_train[:-remainder] + y_train = y_train[:-remainder] + + return (x_train, y_train), (x_test, y_test) diff --git a/examples/vertical-fl/.gitignore b/examples/vertical-fl/.gitignore index 64af4779185a..5d2a2d133ae3 100644 --- a/examples/vertical-fl/.gitignore +++ b/examples/vertical-fl/.gitignore @@ -1,2 +1 @@ -_static/results -!_static/data/train.csv +!data/train.csv diff --git a/examples/vertical-fl/README.md b/examples/vertical-fl/README.md index ab5d2210d8d5..a9f6fc383060 100644 --- a/examples/vertical-fl/README.md +++ b/examples/vertical-fl/README.md @@ -1,11 +1,10 @@ --- -title: Vertical FL Flower Example tags: [vertical, tabular, advanced] dataset: [Titanic] framework: [torch, pandas, scikit-learn] --- -# Vertical Federated Learning example +# Vertical Federated Learning with Flower This example will showcase how you can perform Vertical Federated Learning using Flower. We'll be using the [Titanic dataset](https://www.kaggle.com/competitions/titanic/data) @@ -14,89 +13,6 @@ more details below, but the main idea of Vertical Federated Learning is that each client is holding different feature sets of the same dataset and that the server is holding the labels of this dataset. -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you -can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git _tmp && mv _tmp/examples/vertical-fl . && rm -rf _tmp && cd vertical-fl -``` - -This will create a new directory called `vertical-fl` containing the -following files: - -```shell --- pyproject.toml --- requirements.txt --- _static/data/train.csv --- client.py --- plot.py --- simulation.py --- strategy.py --- task.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `torch` and `flwr`) are defined in -`pyproject.toml` and `requirements.txt`. We recommend -[Poetry](https://python-poetry.org/docs/) to install those dependencies and -manage your virtual environment ([Poetry -installation](https://python-poetry.org/docs/#installation)) or -[pip](https://pip.pypa.io/en/latest/development/), but feel free to use a -different way of installing dependencies and managing virtual environments if -you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual -environment. To verify that everything works correctly you can run the following -command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according -to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Usage - -Once everything is installed, you can just run: - -```shell -poetry run python3 simulation.py -``` - -for `poetry`, otherwise just run: - -```shell -python3 simulation.py -``` - -This will start the Vertical FL training for 1000 rounds with 3 clients. -Eventhough the number of rounds is quite high, this should only take a few -seconds to run as the model is very small. - -## Explanations - -### Vertical FL vs Horizontal FL - | | Horizontal Federated Learning (HFL or just FL) | Vertical Federated Learning (VFL) | | --------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Data Distribution | Clients have different data instances but share the same feature space. Think of different hospitals having different patients' data (samples) but recording the same types of information (features). | Each client holds different features for the same instances. Imagine different institutions holding various tests or measurements for the same group of patients. | @@ -106,412 +22,64 @@ seconds to run as the model is very small. | HFL | VFL | | :-----------------------------: | :-----------------------------: | -| ![HFL diagram](_static/hfl.jpg) | ![VFL diagram](_static/vfl.jpg) | +| ![HFL diagram](_static/hfl.png) | ![VFL diagram](_static/vfl.png) | Those diagrams illustrate HFL vs VFL using a simplified version of what we will be building in this example. Note that on the VFL side, the server holds the labels (the `Survived` column) and will be the only one capable of performing evaluation. -### Data - -#### About - -The Titanic Survival dataset is a popular dataset used to predict passenger survival on -the Titanic based on various features. - -You can see an exhaustive list of the features over on [Kaggle](https://www.kaggle.com/competitions/titanic/data). - -The data is stored as a CSV file in `_static/data/train.csv`, it contains 892 -samples with labels. - -#### Preprocessing - -In `task.py`, you'll find the preprocessing functions we'll apply to our data: - -- Passengers are grouped by age: 'Child' for 10 years and under, - 'Adult' for ages between 11 and 40, and 'Elderly' for those over 40. If the age - isn't listed, we'll label it as 'Unknown'. - - ```python - def _bin_age(age_series): - bins = [-np.inf, 10, 40, np.inf] - labels = ["Child", "Adult", "Elderly"] - return ( - pd.cut(age_series, bins=bins, labels=labels, right=True) - .astype(str) - .replace("nan", "Unknown") - ) - ``` - -- We pull out titles from passengers' names to help our model - understand social status and family roles, simplifying rare titles into a single - 'Rare' category and converting any French titles to their English equivalents. - - ```python - def _extract_title(name_series): - titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) - rare_titles = { - "Lady", - "Countess", - "Capt", - "Col", - "Don", - "Dr", - "Major", - "Rev", - "Sir", - "Jonkheer", - "Dona", - } - titles = titles.replace(list(rare_titles), "Rare") - titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) - return titles - ``` - -- The first letter of each cabin number is used to identify the - cabin area, with any missing entries marked as 'Unknown'. This could provide - insight into the passenger's location on the ship. - -- We remove features like 'PassengerId', 'Name', and - 'Ticket' that won't be necessary for our model's predictions. - -- Lastly, we convert categorical data points such as 'Sex', - 'Pclass', 'Embarked', 'Title', 'Cabin', and the binned 'Age' into One-Hot - encodings. - - ```python - def _create_features(df): - # Convert 'Age' to numeric, coercing errors to NaN - df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - df["Age"] = _bin_age(df["Age"]) - df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") - df["Title"] = _extract_title(df["Name"]) - df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) - all_keywords = set(df.columns) - df = pd.get_dummies( - df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] - ) - return df, all_keywords - ``` - -#### Partitioning - -In `task.py`, we also partition our data for our 3 clients to mirror real-life -collaborations where different organizations hold different feature sets: - -```python -def _partition_data(df, all_keywords): - partitions = [] - keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] - keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) - - for keywords in keywords_sets: - partitions.append( - df[ - list( - { - col - for col in df.columns - for kw in keywords - if kw in col or "Survived" in col - } - ) - ] - ) - - return partitions -``` - -Client 1: This client looks at family connections and accommodations, working -with features like the number of parents and children each passenger had on -board ('Parch'), the cabin number ('Cabin'), and the ticket class ('Pclass'). - -Client 2: Here, the focus is on personal attributes. This client examines the -passengers' gender ('Sex') and societal roles as indicated by their titles -('Title'). - -Client 3: The final client handles the rest of the data that the first two don't -see. This includes the remaining features that give a broader view of the -passengers' information. - -Each client is going to train their models on their own unique data without any -idea of the passengers' survival outcomes, which we're trying to predict. - -Once all clients have done their part, we combine their insights to form a -comprehensive understanding, just as if different organizations were pooling -their knowledge while keeping their data private. This is the essence of -Vertical Federated Learning: separate but together, each contributing to a -collective intelligence without sharing sensitive information. - -Note that our final data processing function looks like that: - -```python -def get_partitions_and_label(): - df = pd.read_csv("_static/data/train.csv") - processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() - processed_df, all_keywords = _create_features(processed_df) - raw_partitions = _partition_data(processed_df, all_keywords) - - partitions = [] - for partition in raw_partitions: - partitions.append(partition.drop("Survived", axis=1)) - return partitions, processed_df["Survived"].values -``` - -This returns the 3 partitions for our clients and the labels for our server. - -### Models - -#### Clients - -Each client's model is a neural network designed to operate on a distinct subset -of features held by a client. In this example we will use simple linear -regression models. - -```python -class ClientModel(nn.Module): - def __init__(self, input_size): - super(ClientModel, self).__init__() - self.fc = nn.Linear(input_size, 4) - - def forward(self, x): - return self.fc(x) -``` - -The `input_size` corresponds to the number of features each client has, and this -model maps those features to a 4-dimensional latent space. The outputs are -essentially feature embeddings that capture the patterns within each client's -data slice. These embeddings are then ready to be sent to the server for further -processing. - -#### Server - -The server's model acts as the central aggregator in the VFL system. It's also a -neural network but with a slightly different architecture tailored to its role -in aggregating the client models' outputs. - -```python -class ServerModel(nn.Module): - def __init__(self): - super(ServerModel, self).__init__() - self.fc = nn.Linear(12, 1) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - x = self.fc(x) - return self.sigmoid(x) -``` - -It comprises a single linear layer that accepts the concatenated outputs from -all client models as its input. The number of inputs to this layer equals the -total number of outputs from the client models (3 x 4 = 12). After processing -these inputs, the linear layer's output is passed through a sigmoid activation -function (`nn.Sigmoid()`), which maps the result to a `(0, 1)` range, providing -a probability score indicative of the likelihood of survival. - -### Strategy - -The strategy we will write to perform the aggregation will inherit from `FedAvg` -and set the following additional attributes: - -```python -self.model = ServerModel(12) -self.initial_parameters = ndarrays_to_parameters( - [val.cpu().numpy() for _, val in self.model.state_dict().items()] -) -self.optimizer = optim.SGD(self.model.parameters(), lr=0.01) -self.criterion = nn.BCELoss() -self.label = torch.tensor(labels).float().unsqueeze(1) -``` - -With `labels` given as an argument to the strategy. +## Set up the project -We then redefine the `aggregate_fit` method: +### Clone the project -```python -def aggregate_fit( - self, - rnd, - results, - failures, -): - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} +Start by cloning the example project: - # Convert results - embedding_results = [ - torch.from_numpy(parameters_to_ndarrays(fit_res.parameters)[0]) - for _, fit_res in results - ] - embeddings_aggregated = torch.cat(embedding_results, dim=1) - embedding_server = embeddings_aggregated.detach().requires_grad_() - output = self.model(embedding_server) - loss = self.criterion(output, self.label) - loss.backward() - - self.optimizer.step() - self.optimizer.zero_grad() - - grads = embedding_server.grad.split([4, 4, 4], dim=1) - np_grads = [grad.numpy() for grad in grads] - parameters_aggregated = ndarrays_to_parameters(np_grads) - - with torch.no_grad(): - correct = 0 - output = self.model(embedding_server) - predicted = (output > 0.5).float() - - correct += (predicted == self.label).sum().item() - - accuracy = correct / len(self.label) * 100 - - metrics_aggregated = {"accuracy": accuracy} - - return parameters_aggregated, metrics_aggregated +```shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/vertical-fl . \ + && rm -rf _tmp \ + && cd vertical-fl ``` -This is where all the magic happens. We first convert the `np.array`s that we -received from our clients to `tensor`s, before concatenating the 3 embeddings -together. This means that we go from 3 tensors of size `(892, 4)` to 1 tensor of -size `(892, 12)`. The combined embeddings are fed through the server model to -get the prediction output. The loss between the predicted output and the actual -labels is calculated. Backward propagation is then performed to calculate the -gradients, which are used to update the server model's parameters. - -The optimizer updates the server model's parameters based on the calculated -gradients, and the gradients are reset to zero to prepare for the next round of -aggregation. - -The gradients from the server model's embedding layer are then split according -to the size of the output from each client model (assuming equal size for -simplicity here), ready to be sent back to the respective client models. - -Finally, with no gradient calculation needed, the model's predictions are -compared to the true labels to calculate the accuracy of the model after the -update. - -Note that this `aggregate_fit` function returns gradients instead of trained -weights. This is because, in this setting, sharing gradients allows each -participant to benefit from the collective feedback gathered from the entire -pool of data without the need to align their different feature spaces (trained -weights are directly tied to specific features of the dataset but not gradients, -which are just a measure of the sensitivity of the loss function to changes in -the model's parameters). This shared feedback, encapsulated in the gradients, -guides each participant's model to adjust and improve, achieving optimization -not just based on its own data but also leveraging insights from the entire -network's data. - -We do not need to return parameters here because updates are completed locally -in VFL. But the server should still send the gradients back to all clients to -let them continue the back prop and update their local model. In Flower, the -parameters returned by `aggregate_fit` will be stored and sent to -`Client.evaluate` via `configure_fit`. So we take advantage of this and return -our gradients in `aggregate_fit` so that they'll be sent to `Client.evaluate` as -`parameters`. That's also why we can obtain gradients from the `parameters` -argument in `Client.evaluate` (see next section). - -The last thing we have to do is to redefine the `aggregate_evaluate` function to -disable distributed evaluation (as the clients do not hold any labels to test -their local models). +This will create a new directory called `vertical-fl` with the following structure: +following files: -```python -def aggregate_evaluate( - self, - rnd, - results, - failures, -): - return None, {} +```shell +vertical-fl +├── vertical_fl +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ ├── strategy.py # Defines your Strategy +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +├── data/train.csv +└── README.md ``` -### Client class and function - -Our `FlowerClient` class is going to be quite straight forward. - -```python -class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid, data): - self.cid = cid - self.train = torch.tensor(StandardScaler().fit_transform(data)).float() - self.model = ClientModel(input_size=self.train.shape[1]) - self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01) - self.embedding = self.model(self.train) +### Install dependencies and project - def get_parameters(self, config): - pass +Install the dependencies defined in `pyproject.toml` as well as the `mlxexample` package. - def fit(self, parameters, config): - self.embedding = self.model(self.train) - return [self.embedding.detach().numpy()], 1, {} - - def evaluate(self, parameters, config): - self.model.zero_grad() - self.embedding.backward(torch.from_numpy(parameters[int(self.cid)])) - self.optimizer.step() - return None +```bash +pip install -e . ``` -After defining our model and data attributes (respectively `self.model` and -`self.train`), we define our `fit` function as such: the `self.model(self.train)` -performs a forward pass using the client's local training data (`self.train`). -This generates the embeddings (feature representations) for the data. To conform -with the return type of the `fit` function, we need to return a list of -`np.array`s (hence the conversion), the number of samples, which won't be used -on the server side, so we just return 1, and then an empty dict. - -For the `evaluate` function, we perform our model's backward pass using the -gradients sent by the server and then update our local model's parameters based -on those new gradients. Note that the `loss` and `num_examples` we return in our -evaluate function are bogus, as they won't be used on the server side. +## Run the project -The `client_fn` we will use in our `start_simulation` function to generate our 3 -clients will be very basic: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```python3 -partitions, label = get_partitions_and_label() +### Run with the Simulation Engine -def client_fn(cid): - return FlowerClient(cid, partitions[int(cid)]).to_client() +```bash +flwr run . ``` -We pass a `client_id` and its corresponding partition to each client. - -### Evaluation +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -Please note that we do not perform distributed evaluation. This is because only -the server holds some labels to compare the results to. This is why the only -evaluation we perform is on the server side. - -In this example, we use the `FlowerClient` `evaluate` function for -backpropagation instead of using it for evaluation. We do this because we know -that the `evaluate` function of the clients will be called after the fit -function. This allows us to aggregate our models in `aggregate_fit` and then -send them back to the clients using this `evaluate` function and perform the -backpropagation. This is not done for evaluation, hence why we return `None` in -the `aggregate_evaluate` function of the strategy. - -### Starting the simulation - -Putting everything together, to start our simulation we use the following -function: - -```python -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=3, - config=fl.server.ServerConfig(num_rounds=1000), - strategy=Strategy(label), -) +```bash +flwr run . --run-config "num-server-rounds=5 learning-rate=0.05" ``` -As mentioned before, we train for 1000 rounds but it should still last only -a few seconds. - -Note that we store the results of the simulation into `hist`, this will allow us -to use the `plot.py` file to plot the accuracy as a function of the number of -rounds. - -## Results - -Here we can observe the results after 1000 rounds: +### Run with the Deployment Engine -![Accuracy plot](_static/vfl-accuracy.png) +> \[!NOTE\] +> An update to this example will show how to run this Flower project with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/vertical-fl/_static/hfl.jpg b/examples/vertical-fl/_static/hfl.jpg deleted file mode 100644 index 7fd4c47de2b3..000000000000 Binary files a/examples/vertical-fl/_static/hfl.jpg and /dev/null differ diff --git a/examples/vertical-fl/_static/hfl.png b/examples/vertical-fl/_static/hfl.png new file mode 100644 index 000000000000..3078b927788a Binary files /dev/null and b/examples/vertical-fl/_static/hfl.png differ diff --git a/examples/vertical-fl/_static/vfl-accuracy.png b/examples/vertical-fl/_static/vfl-accuracy.png deleted file mode 100644 index c436b6db0825..000000000000 Binary files a/examples/vertical-fl/_static/vfl-accuracy.png and /dev/null differ diff --git a/examples/vertical-fl/_static/vfl.jpg b/examples/vertical-fl/_static/vfl.jpg deleted file mode 100644 index a7ce7dbfad31..000000000000 Binary files a/examples/vertical-fl/_static/vfl.jpg and /dev/null differ diff --git a/examples/vertical-fl/_static/vfl.png b/examples/vertical-fl/_static/vfl.png new file mode 100644 index 000000000000..89e8db72a952 Binary files /dev/null and b/examples/vertical-fl/_static/vfl.png differ diff --git a/examples/vertical-fl/client.py b/examples/vertical-fl/client.py deleted file mode 100644 index 9f489e70f086..000000000000 --- a/examples/vertical-fl/client.py +++ /dev/null @@ -1,27 +0,0 @@ -import flwr as fl -import torch -from sklearn.preprocessing import StandardScaler - -from task import ClientModel - - -class FlowerClient(fl.client.NumPyClient): - def __init__(self, cid, data): - self.cid = cid - self.train = torch.tensor(StandardScaler().fit_transform(data)).float() - self.model = ClientModel(input_size=self.train.shape[1]) - self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01) - self.embedding = self.model(self.train) - - def get_parameters(self, config): - pass - - def fit(self, parameters, config): - self.embedding = self.model(self.train) - return [self.embedding.detach().numpy()], 1, {} - - def evaluate(self, parameters, config): - self.model.zero_grad() - self.embedding.backward(torch.from_numpy(parameters[int(self.cid)])) - self.optimizer.step() - return 0.0, 1, {} diff --git a/examples/vertical-fl/_static/data/train.csv b/examples/vertical-fl/data/train.csv similarity index 100% rename from examples/vertical-fl/_static/data/train.csv rename to examples/vertical-fl/data/train.csv diff --git a/examples/vertical-fl/plot.py b/examples/vertical-fl/plot.py deleted file mode 100644 index 3dac7c04a3de..000000000000 --- a/examples/vertical-fl/plot.py +++ /dev/null @@ -1,8 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np - -if __name__ == "__main__": - hist = np.load("_static/results/hist.npy", allow_pickle=True).item() - rounds, values = zip(*hist.metrics_distributed_fit["accuracy"]) - plt.plot(np.asarray(rounds), np.asarray(values)) - plt.savefig("_static/results/accuracy.png") diff --git a/examples/vertical-fl/pyproject.toml b/examples/vertical-fl/pyproject.toml index 19dcd0e7a842..458878748cde 100644 --- a/examples/vertical-fl/pyproject.toml +++ b/examples/vertical-fl/pyproject.toml @@ -1,18 +1,37 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" -[tool.poetry] +[project] name = "vertical-fl" -version = "0.1.0" +version = "1.0.0" description = "PyTorch Vertical FL with Flower" -authors = ["The Flower Authors "] +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "numpy==1.24.4", + "pandas==2.0.3", + "scikit-learn==1.3.2", + "torch==2.1.0", +] -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -torch = "2.1.0" -matplotlib = "3.7.3" -scikit-learn = "1.3.2" -numpy = "1.24.4" -pandas = "2.0.3" +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "vertical_fl.server_app:app" +clientapp = "vertical_fl.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +learning-rate = 0.1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 3 # Note that this example will require changes to how VFL is implemented diff --git a/examples/vertical-fl/requirements.txt b/examples/vertical-fl/requirements.txt deleted file mode 100644 index aee341e4c554..000000000000 --- a/examples/vertical-fl/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -torch==2.1.0 -matplotlib==3.7.3 -scikit-learn==1.3.2 -numpy==1.24.4 -pandas==2.0.3 diff --git a/examples/vertical-fl/simulation.py b/examples/vertical-fl/simulation.py deleted file mode 100644 index 1aa1c341d5eb..000000000000 --- a/examples/vertical-fl/simulation.py +++ /dev/null @@ -1,27 +0,0 @@ -from pathlib import Path - -import flwr as fl -import numpy as np - -from client import FlowerClient -from strategy import Strategy -from task import get_partitions_and_label - -partitions, label = get_partitions_and_label() - - -def client_fn(cid): - return FlowerClient(cid, partitions[int(cid)]).to_client() - - -# Start Flower server -hist = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=3, - config=fl.server.ServerConfig(num_rounds=1000), - strategy=Strategy(label), -) - -results_dir = Path("_static/results") -results_dir.mkdir(exist_ok=True) -np.save(str(results_dir / "hist.npy"), hist) diff --git a/examples/vertical-fl/task.py b/examples/vertical-fl/task.py deleted file mode 100644 index 603a051822e9..000000000000 --- a/examples/vertical-fl/task.py +++ /dev/null @@ -1,90 +0,0 @@ -import numpy as np -import pandas as pd -import torch.nn as nn - - -def _bin_age(age_series): - bins = [-np.inf, 10, 40, np.inf] - labels = ["Child", "Adult", "Elderly"] - return ( - pd.cut(age_series, bins=bins, labels=labels, right=True) - .astype(str) - .replace("nan", "Unknown") - ) - - -def _extract_title(name_series): - titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) - rare_titles = { - "Lady", - "Countess", - "Capt", - "Col", - "Don", - "Dr", - "Major", - "Rev", - "Sir", - "Jonkheer", - "Dona", - } - titles = titles.replace(list(rare_titles), "Rare") - titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) - return titles - - -def _create_features(df): - # Convert 'Age' to numeric, coercing errors to NaN - df["Age"] = pd.to_numeric(df["Age"], errors="coerce") - df["Age"] = _bin_age(df["Age"]) - df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") - df["Title"] = _extract_title(df["Name"]) - df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) - all_keywords = set(df.columns) - df = pd.get_dummies( - df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] - ) - return df, all_keywords - - -def get_partitions_and_label(): - df = pd.read_csv("_static/data/train.csv") - processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() - processed_df, all_keywords = _create_features(processed_df) - raw_partitions = _partition_data(processed_df, all_keywords) - - partitions = [] - for partition in raw_partitions: - partitions.append(partition.drop("Survived", axis=1)) - return partitions, processed_df["Survived"].values - - -def _partition_data(df, all_keywords): - partitions = [] - keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] - keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) - - for keywords in keywords_sets: - partitions.append( - df[ - list( - { - col - for col in df.columns - for kw in keywords - if kw in col or "Survived" in col - } - ) - ] - ) - - return partitions - - -class ClientModel(nn.Module): - def __init__(self, input_size): - super(ClientModel, self).__init__() - self.fc = nn.Linear(input_size, 4) - - def forward(self, x): - return self.fc(x) diff --git a/examples/vertical-fl/vertical_fl/client_app.py b/examples/vertical-fl/vertical_fl/client_app.py new file mode 100644 index 000000000000..d517480da1d4 --- /dev/null +++ b/examples/vertical-fl/vertical_fl/client_app.py @@ -0,0 +1,41 @@ +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from sklearn.preprocessing import StandardScaler +import torch + +from vertical_fl.task import ClientModel, load_data + + +class FlowerClient(NumPyClient): + def __init__(self, v_split_id, data, lr): + self.v_split_id = v_split_id + self.data = torch.tensor(StandardScaler().fit_transform(data)).float() + self.model = ClientModel(input_size=self.data.shape[1]) + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr) + + def get_parameters(self, config): + pass + + def fit(self, parameters, config): + embedding = self.model(self.data) + return [embedding.detach().numpy()], 1, {} + + def evaluate(self, parameters, config): + self.model.zero_grad() + embedding = self.model(self.data) + embedding.backward(torch.from_numpy(parameters[int(self.v_split_id)])) + self.optimizer.step() + return 0.0, 1, {} + + +def client_fn(context: Context): + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + partition, v_split_id = load_data(partition_id, num_partitions=num_partitions) + lr = context.run_config["learning-rate"] + return FlowerClient(v_split_id, partition, lr).to_client() + + +app = ClientApp( + client_fn=client_fn, +) diff --git a/examples/vertical-fl/vertical_fl/server_app.py b/examples/vertical-fl/vertical_fl/server_app.py new file mode 100644 index 000000000000..95620226b707 --- /dev/null +++ b/examples/vertical-fl/vertical_fl/server_app.py @@ -0,0 +1,25 @@ +from flwr.common import Context +from flwr.server import ServerApp, ServerAppComponents, ServerConfig + +from vertical_fl.strategy import Strategy +from vertical_fl.task import process_dataset + + +def server_fn(context: Context) -> ServerAppComponents: + """Construct components that set the ServerApp behaviour.""" + + # Get dataset + processed_df, _ = process_dataset() + + # Define the strategy + strategy = Strategy(processed_df["Survived"].values) + + # Construct ServerConfig + num_rounds = context.run_config["num-server-rounds"] + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Start Flower server +app = ServerApp(server_fn=server_fn) diff --git a/examples/vertical-fl/strategy.py b/examples/vertical-fl/vertical_fl/strategy.py similarity index 66% rename from examples/vertical-fl/strategy.py rename to examples/vertical-fl/vertical_fl/strategy.py index 0744fa83662a..9195416076b0 100644 --- a/examples/vertical-fl/strategy.py +++ b/examples/vertical-fl/vertical_fl/strategy.py @@ -17,37 +17,8 @@ def forward(self, x): class Strategy(fl.server.strategy.FedAvg): - def __init__( - self, - labels, - *, - fraction_fit=1, - fraction_evaluate=1, - min_fit_clients=2, - min_evaluate_clients=2, - min_available_clients=2, - evaluate_fn=None, - on_fit_config_fn=None, - on_evaluate_config_fn=None, - accept_failures=True, - initial_parameters=None, - fit_metrics_aggregation_fn=None, - evaluate_metrics_aggregation_fn=None, - ) -> None: - super().__init__( - fraction_fit=fraction_fit, - fraction_evaluate=fraction_evaluate, - min_fit_clients=min_fit_clients, - min_evaluate_clients=min_evaluate_clients, - min_available_clients=min_available_clients, - evaluate_fn=evaluate_fn, - on_fit_config_fn=on_fit_config_fn, - on_evaluate_config_fn=on_evaluate_config_fn, - accept_failures=accept_failures, - initial_parameters=initial_parameters, - fit_metrics_aggregation_fn=fit_metrics_aggregation_fn, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation_fn, - ) + def __init__(self, labels, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) self.model = ServerModel(12) self.initial_parameters = ndarrays_to_parameters( [val.cpu().numpy() for _, val in self.model.state_dict().items()] diff --git a/examples/vertical-fl/vertical_fl/task.py b/examples/vertical-fl/vertical_fl/task.py new file mode 100644 index 000000000000..8e76d9419a8a --- /dev/null +++ b/examples/vertical-fl/vertical_fl/task.py @@ -0,0 +1,139 @@ +from pathlib import Path +from logging import WARN +import torch.nn as nn +import numpy as np +import pandas as pd +import torch.nn as nn +from flwr.common.logger import log + +from datasets import Dataset +from flwr_datasets.partitioner import IidPartitioner + +NUM_VERTICAL_SPLITS = 3 + + +def _bin_age(age_series): + bins = [-np.inf, 10, 40, np.inf] + labels = ["Child", "Adult", "Elderly"] + return ( + pd.cut(age_series, bins=bins, labels=labels, right=True) + .astype(str) + .replace("nan", "Unknown") + ) + + +def _extract_title(name_series): + titles = name_series.str.extract(" ([A-Za-z]+)\.", expand=False) + rare_titles = { + "Lady", + "Countess", + "Capt", + "Col", + "Don", + "Dr", + "Major", + "Rev", + "Sir", + "Jonkheer", + "Dona", + } + titles = titles.replace(list(rare_titles), "Rare") + titles = titles.replace({"Mlle": "Miss", "Ms": "Miss", "Mme": "Mrs"}) + return titles + + +def _create_features(df): + # Convert 'Age' to numeric, coercing errors to NaN + df["Age"] = pd.to_numeric(df["Age"], errors="coerce") + df["Age"] = _bin_age(df["Age"]) + df["Cabin"] = df["Cabin"].str[0].fillna("Unknown") + df["Title"] = _extract_title(df["Name"]) + df.drop(columns=["PassengerId", "Name", "Ticket"], inplace=True) + all_keywords = set(df.columns) + df = pd.get_dummies( + df, columns=["Sex", "Pclass", "Embarked", "Title", "Cabin", "Age"] + ) + return df, all_keywords + + +def process_dataset(): + + df = pd.read_csv(Path(__file__).parents[1] / "data/train.csv") + processed_df = df.dropna(subset=["Embarked", "Fare"]).copy() + return _create_features(processed_df) + + +def load_data(partition_id: int, num_partitions: int): + """Partition the data vertically and then horizontally. + + We create three sets of features representing three types of nodes participating in + the federation. + + [{'Cabin', 'Parch', 'Pclass'}, {'Sex', 'Title'}, {'Age', 'Embarked', 'Fare', + 'SibSp', 'Survived'}] + + Once the whole dataset is split vertically and a set of features is selected based + on mod(partition_id, 3), it is split horizontally into `ceil(num_partitions/3)` + partitions. This function returns the partition with index `partition_id % 3`. + """ + + if num_partitions != NUM_VERTICAL_SPLITS: + log( + WARN, + "To run this example with num_partitions other than 3, you need to update how " + "the Vertical FL training is performed. This is because the shapes of the " + "gradients migh not be the same along the first dimension.", + ) + + # Read whole dataset and process + processed_df, features_set = process_dataset() + + # Vertical Split and select + v_partitions = _partition_data_vertically(processed_df, features_set) + v_split_id = np.mod(partition_id, NUM_VERTICAL_SPLITS) + v_partition = v_partitions[v_split_id] + + # Comvert to HuggingFace dataset + dataset = Dataset.from_pandas(v_partition) + + # Split horizontally with Flower Dataset partitioner + num_h_partitions = int(np.ceil(num_partitions / NUM_VERTICAL_SPLITS)) + partitioner = IidPartitioner(num_partitions=num_h_partitions) + partitioner.dataset = dataset + + # Extract partition of the `ClientApp` calling this function + partition = partitioner.load_partition(partition_id % num_h_partitions) + partition.remove_columns(["Survived"]) + + return partition.to_pandas(), v_split_id + + +def _partition_data_vertically(df, all_keywords): + partitions = [] + keywords_sets = [{"Parch", "Cabin", "Pclass"}, {"Sex", "Title"}] + keywords_sets.append(all_keywords - keywords_sets[0] - keywords_sets[1]) + + for keywords in keywords_sets: + partitions.append( + df[ + list( + { + col + for col in df.columns + for kw in keywords + if kw in col or "Survived" in col + } + ) + ] + ) + + return partitions + + +class ClientModel(nn.Module): + def __init__(self, input_size): + super().__init__() + self.fc = nn.Linear(input_size, 4) + + def forward(self, x): + return self.fc(x) diff --git a/examples/vit-finetune/client.py b/examples/vit-finetune/client.py deleted file mode 100644 index 6226b9363ca4..000000000000 --- a/examples/vit-finetune/client.py +++ /dev/null @@ -1,80 +0,0 @@ -import flwr -import torch -from flwr.client import NumPyClient -from torch.utils.data import DataLoader - -from dataset import apply_transforms, get_dataset_with_partitions -from model import get_model, set_parameters, train - - -class FedViTClient(NumPyClient): - def __init__(self, trainset): - self.trainset = trainset - self.model = get_model() - - # Determine device - self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - self.model.to(self.device) # send model to device - - def set_for_finetuning(self): - """Freeze all parameter except those in the final head. - - Only output MLP will be updated by the client and therefore, the only part of - the model that will be federated (hence, communicated back to the server for - aggregation.) - """ - - # Disable gradients for everything - self.model.requires_grad_(False) - # Now enable just for output head - self.model.heads.requires_grad_(True) - - def get_parameters(self, config): - """Get locally updated parameters.""" - finetune_layers = self.model.heads - return [val.cpu().numpy() for _, val in finetune_layers.state_dict().items()] - - def fit(self, parameters, config): - set_parameters(self.model, parameters) - - # Get some info from the config - # Get batchsize and LR set from server - batch_size = config["batch_size"] - lr = config["lr"] - - trainloader = DataLoader( - self.trainset, batch_size=batch_size, num_workers=2, shuffle=True - ) - - # Set optimizer - optimizer = torch.optim.Adam(self.model.parameters(), lr=lr) - # Train locally - avg_train_loss = train( - self.model, trainloader, optimizer, epochs=1, device=self.device - ) - # Return locally-finetuned part of the model - return ( - self.get_parameters(config={}), - len(trainloader.dataset), - {"train_loss": avg_train_loss}, - ) - - -# Downloads and partition dataset -federated_ox_flowers, _ = get_dataset_with_partitions(num_partitions=20) - - -def client_fn(cid: str): - """Return a FedViTClient that trains with the cid-th data partition.""" - - trainset_for_this_client = federated_ox_flowers.load_partition(int(cid), "train") - - trainset = trainset_for_this_client.with_transform(apply_transforms) - - return FedViTClient(trainset).to_client() - - -# To be used with Flower Next -app = flwr.client.ClientApp( - client_fn=client_fn, -) diff --git a/examples/vit-finetune/dataset.py b/examples/vit-finetune/dataset.py deleted file mode 100644 index e1e01da61dd4..000000000000 --- a/examples/vit-finetune/dataset.py +++ /dev/null @@ -1,51 +0,0 @@ -from flwr_datasets import FederatedDataset -from torchvision.transforms import ( - CenterCrop, - Compose, - Normalize, - RandomResizedCrop, - Resize, - ToTensor, -) - - -def get_dataset_with_partitions(num_partitions: int): - """Get Oxford Flowers datasets and partition it. - - Return partitioned dataset as well as the whole test set. - """ - - # Get Oxford Flowers-102 and divide it into 20 IID partitions - ox_flowers_fds = FederatedDataset( - dataset="nelorth/oxford-flowers", partitioners={"train": num_partitions} - ) - - centralized_testset = ox_flowers_fds.load_split("test") - return ox_flowers_fds, centralized_testset - - -def apply_eval_transforms(batch): - """Apply a very standard set of image transforms.""" - transforms = Compose( - [ - Resize((256, 256)), - CenterCrop((224, 224)), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch - - -def apply_transforms(batch): - """Apply a very standard set of image transforms.""" - transforms = Compose( - [ - RandomResizedCrop((224, 224)), - ToTensor(), - Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), - ] - ) - batch["image"] = [transforms(img) for img in batch["image"]] - return batch diff --git a/examples/vit-finetune/main.py b/examples/vit-finetune/main.py deleted file mode 100644 index 33ad78a04d47..000000000000 --- a/examples/vit-finetune/main.py +++ /dev/null @@ -1,57 +0,0 @@ -import argparse - -import flwr as fl -import matplotlib.pyplot as plt - -from client import client_fn -from server import strategy - -parser = argparse.ArgumentParser( - description="Finetuning of a ViT with Flower Simulation." -) - -parser.add_argument( - "--num-rounds", - type=int, - default=20, - help="Number of rounds.", -) - - -def main(): - args = parser.parse_args() - - # To control the degree of parallelism - # With default settings in this example, - # each client should take just ~1GB of VRAM. - client_resources = { - "num_cpus": 4, - "num_gpus": 0.2, - } - - # Launch simulation - history = fl.simulation.start_simulation( - client_fn=client_fn, - num_clients=20, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=args.num_rounds), - strategy=strategy, - ) - - print(history) - - # Basic plotting - global_accuracy_centralised = history.metrics_centralized["accuracy"] - round = [int(data[0]) for data in global_accuracy_centralised] - acc = [100.0 * data[1] for data in global_accuracy_centralised] - plt.plot(round, acc) - plt.xticks(round) - plt.grid() - plt.ylabel("Accuracy (%)") - plt.xlabel("Round") - plt.title("Federated finetuning of ViT for Flowers-102") - plt.savefig("central_evaluation.png") - - -if __name__ == "__main__": - main() diff --git a/examples/vit-finetune/model.py b/examples/vit-finetune/model.py deleted file mode 100644 index a0b8294aa485..000000000000 --- a/examples/vit-finetune/model.py +++ /dev/null @@ -1,71 +0,0 @@ -from collections import OrderedDict - -import torch -from torchvision.models import ViT_B_16_Weights, vit_b_16 - - -def get_model(): - """Return a pretrained ViT with all layers frozen except output head.""" - - # Instantiate a pre-trained ViT-B on ImageNet - model = vit_b_16(weights=ViT_B_16_Weights.IMAGENET1K_V1) - - # We're going to federated the finetuning of this model - # using the Oxford Flowers-102 dataset. One easy way to achieve - # this is by re-initializing the output block of the ViT so it - # outputs 102 clases instead of the default 1k - in_features = model.heads[-1].in_features - model.heads[-1] = torch.nn.Linear(in_features, 102) - - # Disable gradients for everything - model.requires_grad_(False) - # Now enable just for output head - model.heads.requires_grad_(True) - - return model - - -def set_parameters(model, parameters): - """Apply the parameters to the model. - - Recall this example only federates the head of the ViT so that's the only part of - the model we need to load. - """ - finetune_layers = model.heads - params_dict = zip(finetune_layers.state_dict().keys(), parameters) - state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - finetune_layers.load_state_dict(state_dict, strict=True) - - -def train(net, trainloader, optimizer, epochs, device): - """Train the model on the training set.""" - criterion = torch.nn.CrossEntropyLoss() - net.train() - avg_loss = 0 - # A very standard training loop for image classification - for _ in range(epochs): - for batch in trainloader: - images, labels = batch["image"].to(device), batch["label"].to(device) - optimizer.zero_grad() - loss = criterion(net(images), labels) - avg_loss += loss.item() / labels.shape[0] - loss.backward() - optimizer.step() - - return avg_loss / len(trainloader) - - -def test(net, testloader, device: str): - """Validate the network on the entire test set.""" - criterion = torch.nn.CrossEntropyLoss() - correct, loss = 0, 0.0 - net.eval() - with torch.no_grad(): - for data in testloader: - images, labels = data["image"].to(device), data["label"].to(device) - outputs = net(images) - loss += criterion(outputs, labels).item() - _, predicted = torch.max(outputs.data, 1) - correct += (predicted == labels).sum().item() - accuracy = correct / len(testloader.dataset) - return loss, accuracy diff --git a/examples/vit-finetune/pyproject.toml b/examples/vit-finetune/pyproject.toml deleted file mode 100644 index d014d6b6fb2a..000000000000 --- a/examples/vit-finetune/pyproject.toml +++ /dev/null @@ -1,17 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "vit-finetune" -version = "0.1.0" -description = "FL finetuning of a Vision Transformer with Flower." -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } -flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } -torch = "2.2.1" -torchvision = "0.17.1" -matplotlib = "3.8.3" diff --git a/examples/vit-finetune/requirements.txt b/examples/vit-finetune/requirements.txt deleted file mode 100644 index 3692be0d6c2c..000000000000 --- a/examples/vit-finetune/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -flwr[simulation]>=1.0, <2.0 -flwr-datasets[vision]>=0.0.2, <1.0.0 -matplotlib==3.8.3 -torch==2.2.1 -torchvision==0.17.1 \ No newline at end of file diff --git a/examples/vit-finetune/server.py b/examples/vit-finetune/server.py deleted file mode 100644 index 5352d34c4fe2..000000000000 --- a/examples/vit-finetune/server.py +++ /dev/null @@ -1,61 +0,0 @@ -import flwr as fl -import torch -from datasets import Dataset -from torch.utils.data import DataLoader - -from dataset import apply_eval_transforms, get_dataset_with_partitions -from model import get_model, set_parameters, test - - -def fit_config(server_round: int): - """Return a configuration with static batch size and (local) epochs.""" - config = { - "lr": 0.01, # Learning rate used by clients - "batch_size": 32, # Batch size to use by clients during fit() - } - return config - - -def get_evaluate_fn( - centralized_testset: Dataset, -): - """Return an evaluation function for centralized evaluation.""" - - def evaluate(server_round, parameters, config): - """Use the entire Oxford Flowers-102 test set for evaluation.""" - - # Determine device - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - - model = get_model() - set_parameters(model, parameters) - model.to(device) - - # Apply transform to dataset - testset = centralized_testset.with_transform(apply_eval_transforms) - - testloader = DataLoader(testset, batch_size=128) - # Run evaluation - loss, accuracy = test(model, testloader, device=device) - - return loss, {"accuracy": accuracy} - - return evaluate - - -# Downloads and partition dataset -_, centralized_testset = get_dataset_with_partitions(num_partitions=20) - -# Configure the strategy -strategy = fl.server.strategy.FedAvg( - fraction_fit=0.5, # Sample 50% of available clients for training each round - fraction_evaluate=0.0, # No federated evaluation - on_fit_config_fn=fit_config, - evaluate_fn=get_evaluate_fn(centralized_testset), # Global evaluation function -) - -# To be used with Flower Next -app = fl.server.ServerApp( - config=fl.server.ServerConfig(num_rounds=3), - strategy=strategy, -) diff --git a/examples/whisper-federated-finetuning/pyproject.toml b/examples/whisper-federated-finetuning/pyproject.toml index 27a89578c5a0..3d7bb023537c 100644 --- a/examples/whisper-federated-finetuning/pyproject.toml +++ b/examples/whisper-federated-finetuning/pyproject.toml @@ -9,7 +9,7 @@ description = "On-device Federated Downstreaming for Speech Classification" authors = ["The Flower Authors "] [tool.poetry.dependencies] -python = ">=3.8,<3.11" +python = ">=3.9,<3.11" flwr = { extras = ["simulation"], version = ">=1.0,<2.0" } transformers = "4.32.1" tokenizers = "0.13.3" diff --git a/examples/xgboost-comprehensive/README.md b/examples/xgboost-comprehensive/README.md index 62fcba2bb06d..f65f2dbeb645 100644 --- a/examples/xgboost-comprehensive/README.md +++ b/examples/xgboost-comprehensive/README.md @@ -4,23 +4,21 @@ dataset: [HIGGS] framework: [xgboost] --- -# Flower Example using XGBoost (Comprehensive) +# Federated Learning with XGBoost and Flower (Comprehensive Example) This example demonstrates a comprehensive federated learning setup using Flower with XGBoost. We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset to perform a binary classification task. This examples uses [Flower Datasets](https://flower.ai/docs/datasets/) to retrieve, partition and preprocess the data for each Flower client. It differs from the [xgboost-quickstart](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) example in the following ways: -- Arguments parsers of server and clients for hyperparameters selection. - Customised FL settings. -- Customised number of partitions. - Customised partitioner type (uniform, linear, square, exponential). - Centralised/distributed evaluation. - Bagging/cyclic training methods. -- You can run it with Flower Simulation +- Support of scaled learning rate. ## Training Strategies -This example provides two training strategies, **bagging aggregation** and **cyclic training**. +This example provides two training strategies, [**bagging aggregation**](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html#tree-based-bagging-aggregation) ([docs](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedXgbBagging.html)) and [**cyclic training**](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html#cyclic_training) ([docs](https://flower.ai/docs/framework/ref-api/flwr.server.strategy.FedXgbCyclic.html)). ### Bagging Aggregation @@ -43,127 +41,86 @@ Instead of aggregating multiple clients, there is only one single client participating in the training per round in the cyclic training scenario. The trained local XGBoost trees will be passed to the next client as an initialised model for next round's boosting. -## Project Setup +## Set up the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/xgboost-comprehensive . && rm -rf flower && cd xgboost-comprehensive -``` - -This will create a new directory called `xgboost-comprehensive` containing the following files: - -``` --- README.md <- Your're reading this right now --- server.py <- Defines the server-side logic --- client.py <- Defines the client-side logic --- dataset.py <- Defines the functions of data loading and partitioning --- utils.py <- Defines the arguments parser and hyper-parameters --- client_utils.py <- Defines the client utility functions --- server_utils.py <- Defines the server utility functions --- sim.py <- Example of using Flower simulation --- run_bagging.sh <- Commands to run bagging experiments --- run_cyclic.sh <- Commands to run cyclic experiments --- pyproject.toml <- Example dependencies (if you use Poetry) --- requirements.txt <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `xgboost` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry +Start by cloning the example project: ```shell -poetry install -poetry shell +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/xgboost-comprehensive . \ + && rm -rf _tmp \ + && cd xgboost-comprehensive ``` -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. +This will create a new directory called `xgboost-comprehensive` with the following structure: ```shell -pip install -r requirements.txt +xgboost-comprehensive +├── xgboost_comprehensive +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your model, training and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -## Run Federated Learning with XGBoost and Flower +### Install dependencies and project -You can run this example in two ways: either by manually launching the server, and then several clients that connect to it; or by launching a Flower simulation. Both run the same workload, yielding identical results. The former is ideal for deployments on different machines, while the latter makes it easy to simulate large client cohorts in a resource-aware manner. You can read more about how Flower Simulation works in the [Documentation](https://flower.ai/docs/framework/how-to-run-simulations.html). The commands shown below assume you have activated your environment (if you decide to use Poetry, you can activate it via `poetry shell`). +Install the dependencies defined in `pyproject.toml` as well as the `xgboost_comprehensive` package. -### Independent Client/Server Setup - -We have two scripts to run bagging and cyclic (client-by-client) experiments. -The included `run_bagging.sh` or `run_cyclic.sh` will start the Flower server (using `server.py`), -sleep for 15 seconds to ensure that the server is up, -and then start 5 Flower clients (using `client.py`) with a small subset of the data from exponential partition distribution. - -You can simply start everything in a terminal as follows: - -```shell -./run_bagging.sh +```bash +pip install -e . ``` -Or - -```shell -./run_cyclic.sh -``` +## Run the project -The script starts processes in the background so that you don't have to open six terminal windows. +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -You can also run the example without the scripts. First, launch the server: +### Run with the Simulation Engine ```bash -python server.py --train-method=bagging/cyclic --pool-size=N --num-clients-per-round=N +flwr run . ``` -Then run at least two clients (each on a new terminal or computer in your network) passing different `PARTITION_ID` and all using the same `N` (denoting the total number of clients or data partitions): +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: ```bash -python client.py --train-method=bagging/cyclic --partition-id=PARTITION_ID --num-partitions=N -``` - -### Flower Simulation Setup - -We also provide an example code (`sim.py`) to use the simulation capabilities of Flower to simulate federated XGBoost training on either a single machine or a cluster of machines. With default arguments, each client will use 2 CPUs. - -To run bagging aggregation with 5 clients for 30 rounds evaluated on centralised test set: +# To run bagging aggregation for 5 rounds evaluated on centralised test set +flwr run . --run-config "train-method='bagging' num-server-rounds=5 centralised-eval=true" -```shell -python sim.py --train-method=bagging --pool-size=5 --num-clients-per-round=5 --num-rounds=30 --centralised-eval +# To run cyclic training with linear partitioner type evaluated on centralised test set: +flwr run . --run-config "train-method='cyclic' partitioner-type='linear' centralised-eval-client=true" ``` -To run cyclic training with 5 clients for 30 rounds evaluated on centralised test set: +> \[!TIP\] +> For a more detailed walk-through check our [XGBoost tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html). +> To extend the aggregation strategy for saving, logging, or other functions, please refer to our [advanced-pytorch](https://github.com/adap/flower/tree/main/examples/advanced-pytorch) example. -```shell -python sim.py --train-method=cyclic --pool-size=5 --num-rounds=30 --centralised-eval-client -``` +### Run with the Deployment Engine -In addition, we provide more options to customise the experimental settings, including data partitioning and centralised/distributed evaluation (see `utils.py`). -Check the [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. -### Expected Experimental Results +## Expected Experimental Results -#### Bagging aggregation experiment +### Bagging aggregation experiment -![](_static/xgboost_flower_auc_bagging.png) +
+XGBoost with Flower and Bagging strategy +
The figure above shows the centralised tested AUC performance over FL rounds with bagging aggregation strategy on 4 experimental settings. One can see that all settings obtain stable performance boost over FL rounds (especially noticeable at the start of training). As expected, uniform client distribution shows higher AUC values than square/exponential setup. -#### Cyclic training experiment +### Cyclic training experiment -![](_static/xgboost_flower_auc_cyclic.png) +
+XGBoost with Flower and Cyclic strategy +
This figure shows the cyclic training results on centralised test set. The models with cyclic training requires more rounds to converge diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py deleted file mode 100644 index 879e106493f6..000000000000 --- a/examples/xgboost-comprehensive/client.py +++ /dev/null @@ -1,81 +0,0 @@ -import warnings -from logging import INFO - -import flwr as fl -from flwr.common.logger import log -from flwr_datasets import FederatedDataset - -from client_utils import XgbClient -from dataset import ( - instantiate_partitioner, - resplit, - train_test_split, - transform_dataset_to_dmatrix, -) -from utils import BST_PARAMS, NUM_LOCAL_ROUND, client_args_parser - -warnings.filterwarnings("ignore", category=UserWarning) - - -# Parse arguments for experimental settings -args = client_args_parser() - -# Train method (bagging or cyclic) -train_method = args.train_method - -# Load (HIGGS) dataset and conduct partitioning -# Instantiate partitioner from ["uniform", "linear", "square", "exponential"] -partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.num_partitions -) -fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - preprocessor=resplit, -) - -# Load the partition for this `partition_id` -log(INFO, "Loading partition...") -partition = fds.load_partition(partition_id=args.partition_id, split="train") -partition.set_format("numpy") - -if args.centralised_eval: - # Use centralised test set for evaluation - train_data = partition - valid_data = fds.load_split("test") - valid_data.set_format("numpy") - num_train = train_data.shape[0] - num_val = valid_data.shape[0] -else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - -# Reformat data to DMatrix for xgboost -log(INFO, "Reformatting data...") -train_dmatrix = transform_dataset_to_dmatrix(train_data) -valid_dmatrix = transform_dataset_to_dmatrix(valid_data) - -# Hyper-parameters for xgboost training -num_local_round = NUM_LOCAL_ROUND -params = BST_PARAMS - -# Setup learning rate -if args.train_method == "bagging" and args.scaled_lr: - new_lr = params["eta"] / args.num_partitions - params.update({"eta": new_lr}) - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ), -) diff --git a/examples/xgboost-comprehensive/dataset.py b/examples/xgboost-comprehensive/dataset.py deleted file mode 100644 index eebd87219fa6..000000000000 --- a/examples/xgboost-comprehensive/dataset.py +++ /dev/null @@ -1,74 +0,0 @@ -from typing import Union - -import xgboost as xgb -from datasets import Dataset, DatasetDict, concatenate_datasets -from flwr_datasets.partitioner import ( - ExponentialPartitioner, - IidPartitioner, - LinearPartitioner, - SquarePartitioner, -) - -CORRELATION_TO_PARTITIONER = { - "uniform": IidPartitioner, - "linear": LinearPartitioner, - "square": SquarePartitioner, - "exponential": ExponentialPartitioner, -} - - -def instantiate_partitioner(partitioner_type: str, num_partitions: int): - """Initialise partitioner based on selected partitioner type and number of - partitions.""" - partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( - num_partitions=num_partitions - ) - return partitioner - - -def train_test_split(partition: Dataset, test_fraction: float, seed: int): - """Split the data into train and validation set given split rate.""" - train_test = partition.train_test_split(test_size=test_fraction, seed=seed) - partition_train = train_test["train"] - partition_test = train_test["test"] - - num_train = len(partition_train) - num_test = len(partition_test) - - return partition_train, partition_test, num_train, num_test - - -def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: - """Transform dataset to DMatrix format for xgboost.""" - x, y = separate_xy(data) - new_data = xgb.DMatrix(x, label=y) - return new_data - - -def separate_xy(data: Union[Dataset, DatasetDict]): - """Return outputs of x (data) and y (labels) .""" - x = data["inputs"] - y = data["label"] - return x, y - - -def resplit(dataset: DatasetDict) -> DatasetDict: - """Increase the quantity of centralised test samples from 500K to 1M.""" - return DatasetDict( - { - "train": dataset["train"].select( - range(0, dataset["train"].num_rows - 500_000) - ), - "test": concatenate_datasets( - [ - dataset["train"].select( - range( - dataset["train"].num_rows - 500_000, - dataset["train"].num_rows, - ) - ), - dataset["test"], - ] - ), - } - ) diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index c9259ffa1db4..3906f8bf3301 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -1,15 +1,55 @@ [build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "xgboost-comprehensive" -version = "0.1.0" -description = "Federated XGBoost with Flower (comprehensive)" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = { extras = ["simulation"], version = ">=1.7.0,<2.0" } -flwr-datasets = ">=0.2.0,<1.0.0" -xgboost = ">=2.0.0,<3.0.0" +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "xgboost_comprehensive" +version = "1.0.0" +description = "Federated Learning with XGBoost and Flower (Comprehensive Example)" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", + "xgboost>=2.0.0", +] + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "xgboost_comprehensive.server_app:app" +clientapp = "xgboost_comprehensive.client_app:app" + +[tool.flwr.app.config] +# ServerApp +train-method = "bagging" # Choose from [bagging, cyclic] +num-server-rounds = 3 +fraction-fit = 1.0 +fraction-evaluate = 1.0 +centralised-eval = false + +# ClientApp +partitioner-type = "uniform" # Choose from [uniform, linear, square, exponential] +test-fraction = 0.2 +seed = 42 +centralised-eval-client = false +local-epochs = 1 +scaled-lr = false +params.objective = "binary:logistic" +params.eta = 0.1 # Learning rate +params.max-depth = 8 +params.eval-metric = "auc" +params.nthread = 16 +params.num-parallel-tree = 1 +params.subsample = 1 +params.tree-method = "hist" + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 5 +options.backend.client-resources.num-cpus = 2 diff --git a/examples/xgboost-comprehensive/requirements.txt b/examples/xgboost-comprehensive/requirements.txt deleted file mode 100644 index 840e19529953..000000000000 --- a/examples/xgboost-comprehensive/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr[simulation]>=1.7.0, <2.0 -flwr-datasets>=0.2.0, <1.0.0 -xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-comprehensive/run_bagging.sh b/examples/xgboost-comprehensive/run_bagging.sh deleted file mode 100755 index a6300b781a06..000000000000 --- a/examples/xgboost-comprehensive/run_bagging.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python3 server.py --pool-size=5 --num-rounds=30 --num-clients-per-round=5 --centralised-eval & -sleep 30 # Sleep for 30s to give the server enough time to start - -for i in `seq 0 4`; do - echo "Starting client $i" - python3 client.py --partition-id=$i --num-partitions=5 --partitioner-type=exponential & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-comprehensive/run_cyclic.sh b/examples/xgboost-comprehensive/run_cyclic.sh deleted file mode 100755 index 258bdf2fe0d8..000000000000 --- a/examples/xgboost-comprehensive/run_cyclic.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python3 server.py --train-method=cyclic --pool-size=5 --num-rounds=100 & -sleep 15 # Sleep for 15s to give the server enough time to start - -for i in `seq 0 4`; do - echo "Starting client $i" - python3 client.py --partition-id=$i --train-method=cyclic --num-partitions=5 --partitioner-type=exponential --centralised-eval & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-comprehensive/server.py b/examples/xgboost-comprehensive/server.py deleted file mode 100644 index 1d0dc0aecd43..000000000000 --- a/examples/xgboost-comprehensive/server.py +++ /dev/null @@ -1,75 +0,0 @@ -import warnings -from logging import INFO - -import flwr as fl -from flwr.common.logger import log -from flwr.server.strategy import FedXgbBagging, FedXgbCyclic -from flwr_datasets import FederatedDataset - -from dataset import resplit, transform_dataset_to_dmatrix -from server_utils import ( - CyclicClientManager, - eval_config, - evaluate_metrics_aggregation, - fit_config, - get_evaluate_fn, -) -from utils import server_args_parser - -warnings.filterwarnings("ignore", category=UserWarning) - - -# Parse arguments for experimental settings -args = server_args_parser() -train_method = args.train_method -pool_size = args.pool_size -num_rounds = args.num_rounds -num_clients_per_round = args.num_clients_per_round -num_evaluate_clients = args.num_evaluate_clients -centralised_eval = args.centralised_eval - -# Load centralised test set -if centralised_eval: - fds = FederatedDataset( - dataset="jxie/higgs", partitioners={"train": 20}, preprocessor=resplit - ) - log(INFO, "Loading centralised test set...") - test_set = fds.load_split("test") - test_set.set_format("numpy") - test_dmatrix = transform_dataset_to_dmatrix(test_set) - - -# Define strategy -if train_method == "bagging": - # Bagging training - strategy = FedXgbBagging( - evaluate_function=get_evaluate_fn(test_dmatrix) if centralised_eval else None, - fraction_fit=(float(num_clients_per_round) / pool_size), - min_fit_clients=num_clients_per_round, - min_available_clients=pool_size, - min_evaluate_clients=num_evaluate_clients if not centralised_eval else 0, - fraction_evaluate=1.0 if not centralised_eval else 0.0, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=( - evaluate_metrics_aggregation if not centralised_eval else None - ), - ) -else: - # Cyclic training - strategy = FedXgbCyclic( - fraction_fit=1.0, - min_available_clients=pool_size, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - ) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=num_rounds), - strategy=strategy, - client_manager=CyclicClientManager() if train_method == "cyclic" else None, -) diff --git a/examples/xgboost-comprehensive/server_utils.py b/examples/xgboost-comprehensive/server_utils.py deleted file mode 100644 index f6610afce5ac..000000000000 --- a/examples/xgboost-comprehensive/server_utils.py +++ /dev/null @@ -1,103 +0,0 @@ -from logging import INFO -from typing import Dict, List, Optional - -import xgboost as xgb -from flwr.common import Parameters, Scalar -from flwr.common.logger import log -from flwr.server.client_manager import SimpleClientManager -from flwr.server.client_proxy import ClientProxy -from flwr.server.criterion import Criterion - -from utils import BST_PARAMS - - -def eval_config(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -def fit_config(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -def evaluate_metrics_aggregation(eval_metrics): - """Return an aggregated metric (AUC) for evaluation.""" - total_num = sum([num for num, _ in eval_metrics]) - auc_aggregated = ( - sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num - ) - metrics_aggregated = {"AUC": auc_aggregated} - return metrics_aggregated - - -def get_evaluate_fn(test_data): - """Return a function for centralised evaluation.""" - - def evaluate_fn( - server_round: int, parameters: Parameters, config: Dict[str, Scalar] - ): - # If at the first round, skip the evaluation - if server_round == 0: - return 0, {} - else: - bst = xgb.Booster(params=BST_PARAMS) - for para in parameters.tensors: - para_b = bytearray(para) - - # Load global model - bst.load_model(para_b) - # Run evaluation - eval_results = bst.eval_set( - evals=[(test_data, "valid")], - iteration=bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - log(INFO, f"AUC = {auc} at round {server_round}") - - return 0, {"AUC": auc} - - return evaluate_fn - - -class CyclicClientManager(SimpleClientManager): - """Provides a cyclic client selection rule.""" - - def sample( - self, - num_clients: int, - min_num_clients: Optional[int] = None, - criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: - """Sample a number of Flower ClientProxy instances.""" - - # Block until at least num_clients are connected. - if min_num_clients is None: - min_num_clients = num_clients - self.wait_for(min_num_clients) - - # Sample clients which meet the criterion - available_cids = list(self.clients) - if criterion is not None: - available_cids = [ - cid for cid in available_cids if criterion.select(self.clients[cid]) - ] - - if num_clients > len(available_cids): - log( - INFO, - "Sampling failed: number of available clients" - " (%s) is less than number of requested clients (%s).", - len(available_cids), - num_clients, - ) - return [] - - # Return all available clients - return [self.clients[cid] for cid in available_cids] diff --git a/examples/xgboost-comprehensive/sim.py b/examples/xgboost-comprehensive/sim.py deleted file mode 100644 index c29e762370fa..000000000000 --- a/examples/xgboost-comprehensive/sim.py +++ /dev/null @@ -1,183 +0,0 @@ -import warnings -from logging import INFO - -import flwr as fl -import xgboost as xgb -from flwr.common.logger import log -from flwr.server.strategy import FedXgbBagging, FedXgbCyclic -from flwr_datasets import FederatedDataset -from tqdm import tqdm - -from client_utils import XgbClient -from dataset import ( - instantiate_partitioner, - resplit, - separate_xy, - train_test_split, - transform_dataset_to_dmatrix, -) -from server_utils import ( - CyclicClientManager, - eval_config, - evaluate_metrics_aggregation, - fit_config, - get_evaluate_fn, -) -from utils import BST_PARAMS, NUM_LOCAL_ROUND, sim_args_parser - -warnings.filterwarnings("ignore", category=UserWarning) - - -def get_client_fn( - train_data_list, valid_data_list, train_method, params, num_local_round -): - """Return a function to construct a client. - - The VirtualClientEngine will execute this function whenever a client is sampled by - the strategy to participate. - """ - - def client_fn(cid: str) -> fl.client.Client: - """Construct a FlowerClient with its own dataset partition.""" - x_train, y_train = train_data_list[int(cid)][0] - x_valid, y_valid = valid_data_list[int(cid)][0] - - # Reformat data to DMatrix - train_dmatrix = xgb.DMatrix(x_train, label=y_train) - valid_dmatrix = xgb.DMatrix(x_valid, label=y_valid) - - # Fetch the number of examples - num_train = train_data_list[int(cid)][1] - num_val = valid_data_list[int(cid)][1] - - # Create and return client - return XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - train_method, - ) - - return client_fn - - -def main(): - # Parse arguments for experimental settings - args = sim_args_parser() - - # Load (HIGGS) dataset and conduct partitioning - partitioner = instantiate_partitioner( - partitioner_type=args.partitioner_type, num_partitions=args.pool_size - ) - fds = FederatedDataset( - dataset="jxie/higgs", - partitioners={"train": partitioner}, - preprocessor=resplit, - ) - - # Load centralised test set - if args.centralised_eval or args.centralised_eval_client: - log(INFO, "Loading centralised test set...") - test_data = fds.load_split("test") - test_data.set_format("numpy") - num_test = test_data.shape[0] - test_dmatrix = transform_dataset_to_dmatrix(test_data) - - # Load partitions and reformat data to DMatrix for xgboost - log(INFO, "Loading client local partitions...") - train_data_list = [] - valid_data_list = [] - - # Load and process all client partitions. This upfront cost is amortized soon - # after the simulation begins since clients wont need to preprocess their partition. - for partition_id in tqdm(range(args.pool_size), desc="Extracting client partition"): - # Extract partition for client with partition_id - partition = fds.load_partition(partition_id=partition_id, split="train") - partition.set_format("numpy") - - if args.centralised_eval_client: - # Use centralised test set for evaluation - train_data = partition - num_train = train_data.shape[0] - x_test, y_test = separate_xy(test_data) - valid_data_list.append(((x_test, y_test), num_test)) - else: - # Train/test splitting - train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=args.test_fraction, seed=args.seed - ) - x_valid, y_valid = separate_xy(valid_data) - valid_data_list.append(((x_valid, y_valid), num_val)) - - x_train, y_train = separate_xy(train_data) - train_data_list.append(((x_train, y_train), num_train)) - - # Define strategy - if args.train_method == "bagging": - # Bagging training - strategy = FedXgbBagging( - evaluate_function=( - get_evaluate_fn(test_dmatrix) if args.centralised_eval else None - ), - fraction_fit=(float(args.num_clients_per_round) / args.pool_size), - min_fit_clients=args.num_clients_per_round, - min_available_clients=args.pool_size, - min_evaluate_clients=( - args.num_evaluate_clients if not args.centralised_eval else 0 - ), - fraction_evaluate=1.0 if not args.centralised_eval else 0.0, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - evaluate_metrics_aggregation_fn=( - evaluate_metrics_aggregation if not args.centralised_eval else None - ), - ) - else: - # Cyclic training - strategy = FedXgbCyclic( - fraction_fit=1.0, - min_available_clients=args.pool_size, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=eval_config, - on_fit_config_fn=fit_config, - ) - - # Resources to be assigned to each virtual client - # In this example we use CPU by default - client_resources = { - "num_cpus": args.num_cpus_per_client, - "num_gpus": 0.0, - } - - # Hyper-parameters for xgboost training - num_local_round = NUM_LOCAL_ROUND - params = BST_PARAMS - - # Setup learning rate - if args.train_method == "bagging" and args.scaled_lr: - new_lr = params["eta"] / args.pool_size - params.update({"eta": new_lr}) - - # Start simulation - fl.simulation.start_simulation( - client_fn=get_client_fn( - train_data_list, - valid_data_list, - args.train_method, - params, - num_local_round, - ), - num_clients=args.pool_size, - client_resources=client_resources, - config=fl.server.ServerConfig(num_rounds=args.num_rounds), - strategy=strategy, - client_manager=CyclicClientManager() if args.train_method == "cyclic" else None, - ) - - -if __name__ == "__main__": - main() diff --git a/examples/xgboost-comprehensive/utils.py b/examples/xgboost-comprehensive/utils.py deleted file mode 100644 index c3582d803a6a..000000000000 --- a/examples/xgboost-comprehensive/utils.py +++ /dev/null @@ -1,179 +0,0 @@ -import argparse - -# Hyper-parameters for xgboost training -NUM_LOCAL_ROUND = 1 -BST_PARAMS = { - "objective": "binary:logistic", - "eta": 0.1, # Learning rate - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", -} - - -def client_args_parser(): - """Parse arguments to define experimental settings on client side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--num-partitions", default=10, type=int, help="Number of partitions." - ) - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--partition-id", - default=0, - type=int, - help="Partition ID used for the current client.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args - - -def server_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - parser.add_argument( - "--pool-size", default=2, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=5, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=2, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=2, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - - args = parser.parse_args() - return args - - -def sim_args_parser(): - """Parse arguments to define experimental settings on server side.""" - parser = argparse.ArgumentParser() - - parser.add_argument( - "--train-method", - default="bagging", - type=str, - choices=["bagging", "cyclic"], - help="Training methods selected from bagging aggregation or cyclic training.", - ) - - # Server side - parser.add_argument( - "--pool-size", default=5, type=int, help="Number of total clients." - ) - parser.add_argument( - "--num-rounds", default=30, type=int, help="Number of FL rounds." - ) - parser.add_argument( - "--num-clients-per-round", - default=5, - type=int, - help="Number of clients participate in training each round.", - ) - parser.add_argument( - "--num-evaluate-clients", - default=5, - type=int, - help="Number of clients selected for evaluation.", - ) - parser.add_argument( - "--centralised-eval", - action="store_true", - help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", - ) - parser.add_argument( - "--num-cpus-per-client", - default=2, - type=int, - help="Number of CPUs used for per client.", - ) - - # Client side - parser.add_argument( - "--partitioner-type", - default="uniform", - type=str, - choices=["uniform", "linear", "square", "exponential"], - help="Partitioner types.", - ) - parser.add_argument( - "--seed", default=42, type=int, help="Seed used for train/test splitting." - ) - parser.add_argument( - "--test-fraction", - default=0.2, - type=float, - help="Test fraction for train/test splitting.", - ) - parser.add_argument( - "--centralised-eval-client", - action="store_true", - help="Conduct evaluation on centralised test set (True), or on hold-out data (False).", - ) - parser.add_argument( - "--scaled-lr", - action="store_true", - help="Perform scaled learning rate based on the number of clients (True).", - ) - - args = parser.parse_args() - return args diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/__init__.py b/examples/xgboost-comprehensive/xgboost_comprehensive/__init__.py new file mode 100644 index 000000000000..1716a1a221d0 --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/__init__.py @@ -0,0 +1 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/client_app.py b/examples/xgboost-comprehensive/xgboost_comprehensive/client_app.py new file mode 100644 index 000000000000..d4217f5c3680 --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/client_app.py @@ -0,0 +1,165 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" + +import warnings + +import xgboost as xgb +from xgboost_comprehensive.task import load_data, replace_keys + +from flwr.client import Client, ClientApp +from flwr.common import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + Parameters, + Status, +) +from flwr.common.config import unflatten_dict +from flwr.common.context import Context + +warnings.filterwarnings("ignore", category=UserWarning) + + +# Define Flower-Xgb Client and client_fn +class XgbClient(Client): + def __init__( + self, + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ): + self.train_dmatrix = train_dmatrix + self.valid_dmatrix = valid_dmatrix + self.num_train = num_train + self.num_val = num_val + self.num_local_round = num_local_round + self.params = params + self.train_method = train_method + + def _local_boost(self, bst_input): + # Update trees based on local training data. + for i in range(self.num_local_round): + bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) + + # Bagging: extract the last N=num_local_round trees for sever aggregation + # Cyclic: return the entire model + bst = ( + bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() + ] + if self.train_method == "bagging" + else bst_input + ) + + return bst + + def fit(self, ins: FitIns) -> FitRes: + global_round = int(ins.config["global_round"]) + if global_round == 1: + # First round local training + bst = xgb.train( + self.params, + self.train_dmatrix, + num_boost_round=self.num_local_round, + evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], + ) + else: + bst = xgb.Booster(params=self.params) + global_model = bytearray(ins.parameters.tensors[0]) + + # Load global model into booster + bst.load_model(global_model) + + # Local training + bst = self._local_boost(bst) + + # Save model + local_model = bst.save_raw("json") + local_model_bytes = bytes(local_model) + + return FitRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), + num_examples=self.num_train, + metrics={}, + ) + + def evaluate(self, ins: EvaluateIns) -> EvaluateRes: + # Load global model + bst = xgb.Booster(params=self.params) + para_b = bytearray(ins.parameters.tensors[0]) + bst.load_model(para_b) + + # Run evaluation + eval_results = bst.eval_set( + evals=[(self.valid_dmatrix, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + return EvaluateRes( + status=Status( + code=Code.OK, + message="OK", + ), + loss=0.0, + num_examples=self.num_val, + metrics={"AUC": auc}, + ) + + +def client_fn(context: Context): + # Load model and data + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + + # Parse configs + cfg = replace_keys(unflatten_dict(context.run_config)) + num_local_round = cfg["local_epochs"] + train_method = cfg["train_method"] + params = cfg["params"] + partitioner_type = cfg["partitioner_type"] + seed = cfg["seed"] + test_fraction = cfg["test_fraction"] + centralised_eval_client = cfg["centralised_eval_client"] + + # Load training and validation data + train_dmatrix, valid_dmatrix, num_train, num_val = load_data( + partitioner_type, + partition_id, + num_partitions, + centralised_eval_client, + test_fraction, + seed, + ) + + # Setup learning rate + if cfg["scaled_lr"]: + new_lr = cfg["params"]["eta"] / num_partitions + cfg["params"].update({"eta": new_lr}) + + # Return Client instance + return XgbClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + params, + train_method, + ) + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/server_app.py b/examples/xgboost-comprehensive/xgboost_comprehensive/server_app.py new file mode 100644 index 000000000000..eebaad403259 --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/server_app.py @@ -0,0 +1,168 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" + +from logging import INFO +from typing import Dict, List, Optional + +import xgboost as xgb +from xgboost_comprehensive.task import replace_keys, transform_dataset_to_dmatrix + +from datasets import load_dataset +from flwr.common import Context, Parameters, Scalar +from flwr.common.config import unflatten_dict +from flwr.common.logger import log +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.client_manager import SimpleClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.criterion import Criterion +from flwr.server.strategy import FedXgbBagging, FedXgbCyclic + + +class CyclicClientManager(SimpleClientManager): + """Provides a cyclic client selection rule.""" + + def sample( + self, + num_clients: int, + min_num_clients: Optional[int] = None, + criterion: Optional[Criterion] = None, + ) -> List[ClientProxy]: + """Sample a number of Flower ClientProxy instances.""" + + # Block until at least num_clients are connected. + if min_num_clients is None: + min_num_clients = num_clients + self.wait_for(min_num_clients) + + # Sample clients which meet the criterion + available_cids = list(self.clients) + if criterion is not None: + available_cids = [ + cid for cid in available_cids if criterion.select(self.clients[cid]) + ] + + if num_clients > len(available_cids): + log( + INFO, + "Sampling failed: number of available clients" + " (%s) is less than number of requested clients (%s).", + len(available_cids), + num_clients, + ) + return [] + + # Return all available clients + return [self.clients[cid] for cid in available_cids] + + +def get_evaluate_fn(test_data, params): + """Return a function for centralised evaluation.""" + + def evaluate_fn( + server_round: int, parameters: Parameters, config: Dict[str, Scalar] + ): + # If at the first round, skip the evaluation + if server_round == 0: + return 0, {} + else: + bst = xgb.Booster(params=params) + for para in parameters.tensors: + para_b = bytearray(para) + + # Load global model + bst.load_model(para_b) + # Run evaluation + eval_results = bst.eval_set( + evals=[(test_data, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + # Save results to disk. + # Note we add new entry to the same file with each call to this function. + with open(f"./centralised_eval.txt", "a", encoding="utf-8") as fp: + fp.write(f"Round:{server_round},AUC:{auc}\n") + + return 0, {"AUC": auc} + + return evaluate_fn + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +def config_func(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def server_fn(context: Context): + # Read from config + cfg = replace_keys(unflatten_dict(context.run_config)) + num_rounds = cfg["num_server_rounds"] + fraction_fit = cfg["fraction_fit"] + fraction_evaluate = cfg["fraction_evaluate"] + train_method = cfg["train_method"] + params = cfg["params"] + centralised_eval = cfg["centralised_eval"] + + if centralised_eval: + # This is the exact same dataset as the one downloaded by the clients via + # FlowerDatasets. However, we don't use FlowerDatasets for the server since + # partitioning is not needed. + # We make use of the "test" split only + test_set = load_dataset("jxie/higgs")["test"] + test_set.set_format("numpy") + test_dmatrix = transform_dataset_to_dmatrix(test_set) + + # Init an empty Parameter + parameters = Parameters(tensor_type="", tensors=[]) + + # Define strategy + if train_method == "bagging": + # Bagging training + strategy = FedXgbBagging( + evaluate_function=( + get_evaluate_fn(test_dmatrix, params) if centralised_eval else None + ), + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate if not centralised_eval else 0.0, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + evaluate_metrics_aggregation_fn=( + evaluate_metrics_aggregation if not centralised_eval else None + ), + initial_parameters=parameters, + ) + else: + # Cyclic training + strategy = FedXgbCyclic( + fraction_fit=1.0, + fraction_evaluate=1.0, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + initial_parameters=parameters, + ) + + config = ServerConfig(num_rounds=num_rounds) + client_manager = CyclicClientManager() if train_method == "cyclic" else None + + return ServerAppComponents( + strategy=strategy, config=config, client_manager=client_manager + ) + + +# Create ServerApp +app = ServerApp( + server_fn=server_fn, +) diff --git a/examples/xgboost-comprehensive/xgboost_comprehensive/task.py b/examples/xgboost-comprehensive/xgboost_comprehensive/task.py new file mode 100644 index 000000000000..7454319de38e --- /dev/null +++ b/examples/xgboost-comprehensive/xgboost_comprehensive/task.py @@ -0,0 +1,129 @@ +"""xgboost_comprehensive: A Flower / XGBoost app.""" + +from logging import INFO + +import xgboost as xgb +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import ( + ExponentialPartitioner, + IidPartitioner, + LinearPartitioner, + SquarePartitioner, +) + +from datasets import DatasetDict, concatenate_datasets +from flwr.common import log + +CORRELATION_TO_PARTITIONER = { + "uniform": IidPartitioner, + "linear": LinearPartitioner, + "square": SquarePartitioner, + "exponential": ExponentialPartitioner, +} + +fds = None # Cache FederatedDataset + + +def train_test_split(partition, test_fraction, seed): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + +def transform_dataset_to_dmatrix(data): + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + + +def instantiate_fds(partitioner_type, num_partitions): + """Initialize FederatedDataset.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( + num_partitions=num_partitions + ) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + preprocessor=resplit, + ) + return fds + + +def load_data( + partitioner_type, + partition_id, + num_partitions, + centralised_eval_client, + test_fraction, + seed, +): + """Load partition data.""" + fds_ = instantiate_fds(partitioner_type, num_partitions) + partition = fds_.load_partition(partition_id) + partition.set_format("numpy") + + if centralised_eval_client: + train_data = partition + num_train = train_data.shape[0] + + # Use centralised test set for evaluation + valid_data = fds_.load_split("test") + valid_data.set_format("numpy") + num_val = valid_data.shape[0] + else: + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=test_fraction, seed=seed + ) + + # Reformat data to DMatrix for xgboost + log(INFO, "Reformatting data...") + train_dmatrix = transform_dataset_to_dmatrix(train_data) + valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + + return train_dmatrix, valid_dmatrix, num_train, num_val + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict + + +def resplit(dataset: DatasetDict) -> DatasetDict: + """Increase the quantity of centralised test samples from 500K to 1M.""" + return DatasetDict( + { + "train": dataset["train"].select( + range(0, dataset["train"].num_rows - 500_000) + ), + "test": concatenate_datasets( + [ + dataset["train"].select( + range( + dataset["train"].num_rows - 500_000, + dataset["train"].num_rows, + ) + ), + dataset["test"], + ] + ), + } + ) diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index fa3e9d0dc6fb..a7b047c090f0 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -4,7 +4,7 @@ dataset: [HIGGS] framework: [xgboost] --- -# Flower Example using XGBoost +# Federated Learning with XGBoost and Flower (Quickstart Example) This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) within Flower using `xgboost` package. We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset for this example to perform a binary classification task. @@ -12,72 +12,60 @@ Tree-based with bagging method is used for aggregation on the server. This project provides a minimal code example to enable you to get started quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). -## Project Setup +## Set up the project -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: +### Clone the project -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/xgboost-quickstart . && rm -rf flower && cd xgboost-quickstart -``` - -This will create a new directory called `xgboost-quickstart` containing the following files: - -``` --- README.md <- Your're reading this right now --- server.py <- Defines the server-side logic --- client.py <- Defines the client-side logic --- run.sh <- Commands to run experiments --- pyproject.toml <- Example dependencies -``` - -### Installing Dependencies - -Project dependencies (such as `xgboost` and `flwr`) are defined in `pyproject.toml`. You can install the dependencies by invoking `pip`: +Start by cloning the example project: ```shell -# From a new python environment, run: -pip install . +git clone --depth=1 https://github.com/adap/flower.git _tmp \ + && mv _tmp/examples/xgboost-quickstart . \ + && rm -rf _tmp \ + && cd xgboost-quickstart ``` -Then, to verify that everything works correctly you can run the following command: +This will create a new directory called `xgboost-quickstart` with the following structure: ```shell -python3 -c "import flwr" +xgboost-quickstart +├── xgboost_quickstart +│ ├── __init__.py +│ ├── client_app.py # Defines your ClientApp +│ ├── server_app.py # Defines your ServerApp +│ └── task.py # Defines your utilities and data loading +├── pyproject.toml # Project metadata like dependencies and configs +└── README.md ``` -If you don't see any errors you're good to go! +### Install dependencies and project -## Run Federated Learning with XGBoost and Flower +Install the dependencies defined in `pyproject.toml` as well as the `xgboost_quickstart` package. -Afterwards you are ready to start the Flower server as well as the clients. -You can simply start the server in a terminal as follows: - -```shell -python3 server.py +```bash +pip install -e . ``` -Now you are ready to start the Flower clients which will participate in the learning. -To do so simply open two more terminal windows and run the following commands. +## Run the project -Start client 1 in the first terminal: +You can run your Flower project in both _simulation_ and _deployment_ mode without making changes to the code. If you are starting with Flower, we recommend you using the _simulation_ mode as it requires fewer components to be launched manually. By default, `flwr run` will make use of the Simulation Engine. -```shell -python3 client.py --partition-id=0 +### Run with the Simulation Engine + +```bash +flwr run . ``` -Start client 2 in the second terminal: +You can also override some of the settings for your `ClientApp` and `ServerApp` defined in `pyproject.toml`. For example: -```shell -python3 client.py --partition-id=1 +```bash +flwr run . --run-config "num-server-rounds=5 params.eta=0.05" ``` -You will see that XGBoost is starting a federated training. - -Alternatively, you can use `run.sh` to run the same experiment in a single terminal as follows: +> \[!TIP\] +> For a more detailed walk-through check our [quickstart XGBoost tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) -```shell -poetry run ./run.sh -``` +### Run with the Deployment Engine -Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) -and [tutorial](https://flower.ai/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +> \[!NOTE\] +> An update to this example will show how to run this Flower application with the Deployment Engine and TLS certificates, or with Docker. diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py deleted file mode 100644 index d505a7ede785..000000000000 --- a/examples/xgboost-quickstart/client.py +++ /dev/null @@ -1,207 +0,0 @@ -import argparse -import warnings -from logging import INFO -from typing import Union - -import flwr as fl -import xgboost as xgb -from datasets import Dataset, DatasetDict -from flwr.common import ( - Code, - EvaluateIns, - EvaluateRes, - FitIns, - FitRes, - GetParametersIns, - GetParametersRes, - Parameters, - Status, -) -from flwr.common.logger import log -from flwr_datasets import FederatedDataset -from flwr_datasets.partitioner import IidPartitioner - -warnings.filterwarnings("ignore", category=UserWarning) - -# Define arguments parser for the client/partition ID. -parser = argparse.ArgumentParser() -parser.add_argument( - "--partition-id", - default=0, - type=int, - help="Partition ID used for the current client.", -) -args = parser.parse_args() - - -# Define data partitioning related functions -def train_test_split(partition: Dataset, test_fraction: float, seed: int): - """Split the data into train and validation set given split rate.""" - train_test = partition.train_test_split(test_size=test_fraction, seed=seed) - partition_train = train_test["train"] - partition_test = train_test["test"] - - num_train = len(partition_train) - num_test = len(partition_test) - - return partition_train, partition_test, num_train, num_test - - -def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: - """Transform dataset to DMatrix format for xgboost.""" - x = data["inputs"] - y = data["label"] - new_data = xgb.DMatrix(x, label=y) - return new_data - - -# Load (HIGGS) dataset and conduct partitioning -# We use a small subset (num_partitions=30) of the dataset for demonstration to speed up the data loading process. -partitioner = IidPartitioner(num_partitions=30) -fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) - -# Load the partition for this `partition_id` -log(INFO, "Loading partition...") -partition = fds.load_partition(partition_id=args.partition_id, split="train") -partition.set_format("numpy") - -# Train/test splitting -train_data, valid_data, num_train, num_val = train_test_split( - partition, test_fraction=0.2, seed=42 -) - -# Reformat data to DMatrix for xgboost -log(INFO, "Reformatting data...") -train_dmatrix = transform_dataset_to_dmatrix(train_data) -valid_dmatrix = transform_dataset_to_dmatrix(valid_data) - -# Hyper-parameters for xgboost training -num_local_round = 1 -params = { - "objective": "binary:logistic", - "eta": 0.1, # Learning rate - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", -} - - -# Define Flower client -class XgbClient(fl.client.Client): - def __init__( - self, - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ): - self.train_dmatrix = train_dmatrix - self.valid_dmatrix = valid_dmatrix - self.num_train = num_train - self.num_val = num_val - self.num_local_round = num_local_round - self.params = params - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) - - def _local_boost(self, bst_input): - # Update trees based on local training data. - for i in range(self.num_local_round): - bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) - - # Bagging: extract the last N=num_local_round trees for sever aggregation - bst = bst_input[ - bst_input.num_boosted_rounds() - - self.num_local_round : bst_input.num_boosted_rounds() - ] - - return bst - - def fit(self, ins: FitIns) -> FitRes: - global_round = int(ins.config["global_round"]) - if global_round == 1: - # First round local training - bst = xgb.train( - self.params, - self.train_dmatrix, - num_boost_round=self.num_local_round, - evals=[(self.valid_dmatrix, "validate"), (self.train_dmatrix, "train")], - ) - else: - bst = xgb.Booster(params=self.params) - for item in ins.parameters.tensors: - global_model = bytearray(item) - - # Load global model into booster - bst.load_model(global_model) - - # Local training - bst = self._local_boost(bst) - - # Save model - local_model = bst.save_raw("json") - local_model_bytes = bytes(local_model) - - return FitRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), - num_examples=self.num_train, - metrics={}, - ) - - def evaluate(self, ins: EvaluateIns) -> EvaluateRes: - # Load global model - bst = xgb.Booster(params=self.params) - for para in ins.parameters.tensors: - para_b = bytearray(para) - bst.load_model(para_b) - - # Run evaluation - eval_results = bst.eval_set( - evals=[(self.valid_dmatrix, "valid")], - iteration=bst.num_boosted_rounds() - 1, - ) - auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - - return EvaluateRes( - status=Status( - code=Code.OK, - message="OK", - ), - loss=0.0, - num_examples=self.num_val, - metrics={"AUC": auc}, - ) - - -# Start Flower client -fl.client.start_client( - server_address="127.0.0.1:8080", - client=XgbClient( - train_dmatrix, - valid_dmatrix, - num_train, - num_val, - num_local_round, - params, - ).to_client(), -) diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml index f1e451fe779a..3bfedb6b1d58 100644 --- a/examples/xgboost-quickstart/pyproject.toml +++ b/examples/xgboost-quickstart/pyproject.toml @@ -3,17 +3,45 @@ requires = ["hatchling"] build-backend = "hatchling.build" [project] -name = "quickstart-xgboost" -version = "0.1.0" -description = "XGBoost Federated Learning Quickstart with Flower" -authors = [ - { name = "The Flower Authors", email = "hello@flower.ai" }, -] +name = "xgboost_quickstart" +version = "1.0.0" +description = "Federated Learning with XGBoost and Flower (Quickstart Example)" +license = "Apache-2.0" dependencies = [ - "flwr>=1.8.0,<2.0", - "flwr-datasets>=0.1.0,<1.0.0", - "xgboost>=2.0.0,<3.0.0", + "flwr-nightly[simulation]==1.11.0.dev20240826", + "flwr-datasets>=0.3.0", + "xgboost>=2.0.0", ] [tool.hatch.build.targets.wheel] packages = ["."] + +[tool.flwr.app] +publisher = "flwrlabs" + +[tool.flwr.app.components] +serverapp = "xgboost_quickstart.server_app:app" +clientapp = "xgboost_quickstart.client_app:app" + +[tool.flwr.app.config] +# ServerApp +num-server-rounds = 3 +fraction-fit = 0.1 +fraction-evaluate = 0.1 + +# ClientApp +local-epochs = 1 +params.objective = "binary:logistic" +params.eta = 0.1 # Learning rate +params.max-depth = 8 +params.eval-metric = "auc" +params.nthread = 16 +params.num-parallel-tree = 1 +params.subsample = 1 +params.tree-method = "hist" + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 20 diff --git a/examples/xgboost-quickstart/run.sh b/examples/xgboost-quickstart/run.sh deleted file mode 100755 index b35af58222ab..000000000000 --- a/examples/xgboost-quickstart/run.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -set -e -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ - -echo "Starting server" -python server.py & -sleep 5 # Sleep for 5s to give the server enough time to start - -for i in `seq 0 1`; do - echo "Starting client $i" - python3 client.py --partition-id=$i & -done - -# Enable CTRL+C to stop all background processes -trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM -# Wait for all background processes to complete -wait diff --git a/examples/xgboost-quickstart/server.py b/examples/xgboost-quickstart/server.py deleted file mode 100644 index 2246d32686a4..000000000000 --- a/examples/xgboost-quickstart/server.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Dict - -import flwr as fl -from flwr.server.strategy import FedXgbBagging - -# FL experimental settings -pool_size = 2 -num_rounds = 5 -num_clients_per_round = 2 -num_evaluate_clients = 2 - - -def evaluate_metrics_aggregation(eval_metrics): - """Return an aggregated metric (AUC) for evaluation.""" - total_num = sum([num for num, _ in eval_metrics]) - auc_aggregated = ( - sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num - ) - metrics_aggregated = {"AUC": auc_aggregated} - return metrics_aggregated - - -def config_func(rnd: int) -> Dict[str, str]: - """Return a configuration with global epochs.""" - config = { - "global_round": str(rnd), - } - return config - - -# Define strategy -strategy = FedXgbBagging( - fraction_fit=(float(num_clients_per_round) / pool_size), - min_fit_clients=num_clients_per_round, - min_available_clients=pool_size, - min_evaluate_clients=num_evaluate_clients, - fraction_evaluate=1.0, - evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, - on_evaluate_config_fn=config_func, - on_fit_config_fn=config_func, -) - -# Start Flower server -fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=num_rounds), - strategy=strategy, -) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/__init__.py b/examples/xgboost-quickstart/xgboost_quickstart/__init__.py new file mode 100644 index 000000000000..470360b377a6 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/__init__.py @@ -0,0 +1 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" diff --git a/examples/xgboost-comprehensive/client_utils.py b/examples/xgboost-quickstart/xgboost_quickstart/client_app.py similarity index 66% rename from examples/xgboost-comprehensive/client_utils.py rename to examples/xgboost-quickstart/xgboost_quickstart/client_app.py index 0ef868c505b8..3aa199a10274 100644 --- a/examples/xgboost-comprehensive/client_utils.py +++ b/examples/xgboost-quickstart/xgboost_quickstart/client_app.py @@ -1,22 +1,29 @@ -from logging import INFO +"""xgboost_quickstart: A Flower / XGBoost app.""" + +import warnings + +from flwr.common.context import Context -import flwr as fl import xgboost as xgb +from flwr.client import Client, ClientApp +from flwr.common.config import unflatten_dict from flwr.common import ( Code, EvaluateIns, EvaluateRes, FitIns, FitRes, - GetParametersIns, - GetParametersRes, Parameters, Status, ) -from flwr.common.logger import log +from xgboost_quickstart.task import load_data, replace_keys + +warnings.filterwarnings("ignore", category=UserWarning) -class XgbClient(fl.client.Client): + +# Define Flower Client and client_fn +class FlowerClient(Client): def __init__( self, train_dmatrix, @@ -25,7 +32,6 @@ def __init__( num_val, num_local_round, params, - train_method, ): self.train_dmatrix = train_dmatrix self.valid_dmatrix = valid_dmatrix @@ -33,17 +39,6 @@ def __init__( self.num_val = num_val self.num_local_round = num_local_round self.params = params - self.train_method = train_method - - def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: - _ = (self, ins) - return GetParametersRes( - status=Status( - code=Code.OK, - message="OK", - ), - parameters=Parameters(tensor_type="", tensors=[]), - ) def _local_boost(self, bst_input): # Update trees based on local training data. @@ -51,15 +46,10 @@ def _local_boost(self, bst_input): bst_input.update(self.train_dmatrix, bst_input.num_boosted_rounds()) # Bagging: extract the last N=num_local_round trees for sever aggregation - # Cyclic: return the entire model - bst = ( - bst_input[ - bst_input.num_boosted_rounds() - - self.num_local_round : bst_input.num_boosted_rounds() - ] - if self.train_method == "bagging" - else bst_input - ) + bst = bst_input[ + bst_input.num_boosted_rounds() + - self.num_local_round : bst_input.num_boosted_rounds() + ] return bst @@ -75,8 +65,7 @@ def fit(self, ins: FitIns) -> FitRes: ) else: bst = xgb.Booster(params=self.params) - for item in ins.parameters.tensors: - global_model = bytearray(item) + global_model = bytearray(ins.parameters.tensors[0]) # Load global model into booster bst.load_model(global_model) @@ -101,8 +90,7 @@ def fit(self, ins: FitIns) -> FitRes: def evaluate(self, ins: EvaluateIns) -> EvaluateRes: # Load global model bst = xgb.Booster(params=self.params) - for para in ins.parameters.tensors: - para_b = bytearray(para) + para_b = bytearray(ins.parameters.tensors[0]) bst.load_model(para_b) # Run evaluation @@ -112,9 +100,6 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: ) auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) - global_round = ins.config["global_round"] - log(INFO, f"AUC = {auc} at round {global_round}") - return EvaluateRes( status=Status( code=Code.OK, @@ -124,3 +109,31 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: num_examples=self.num_val, metrics={"AUC": auc}, ) + + +def client_fn(context: Context): + # Load model and data + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + train_dmatrix, valid_dmatrix, num_train, num_val = load_data( + partition_id, num_partitions + ) + + cfg = replace_keys(unflatten_dict(context.run_config)) + num_local_round = cfg["local_epochs"] + + # Return Client instance + return FlowerClient( + train_dmatrix, + valid_dmatrix, + num_train, + num_val, + num_local_round, + cfg["params"], + ) + + +# Flower ClientApp +app = ClientApp( + client_fn, +) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/server_app.py b/examples/xgboost-quickstart/xgboost_quickstart/server_app.py new file mode 100644 index 000000000000..6b81c6caa785 --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/server_app.py @@ -0,0 +1,54 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +from typing import Dict + +from flwr.common import Context, Parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedXgbBagging + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +def config_func(rnd: int) -> Dict[str, str]: + """Return a configuration with global epochs.""" + config = { + "global_round": str(rnd), + } + return config + + +def server_fn(context: Context): + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + fraction_evaluate = context.run_config["fraction-evaluate"] + + # Init an empty Parameter + parameters = Parameters(tensor_type="", tensors=[]) + + # Define strategy + strategy = FedXgbBagging( + fraction_fit=fraction_fit, + fraction_evaluate=fraction_evaluate, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + on_evaluate_config_fn=config_func, + on_fit_config_fn=config_func, + initial_parameters=parameters, + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp( + server_fn=server_fn, +) diff --git a/examples/xgboost-quickstart/xgboost_quickstart/task.py b/examples/xgboost-quickstart/xgboost_quickstart/task.py new file mode 100644 index 000000000000..09916d9ac04a --- /dev/null +++ b/examples/xgboost-quickstart/xgboost_quickstart/task.py @@ -0,0 +1,71 @@ +"""xgboost_quickstart: A Flower / XGBoost app.""" + +from logging import INFO + +import xgboost as xgb +from flwr.common import log +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner + + +def train_test_split(partition, test_fraction, seed): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + +def transform_dataset_to_dmatrix(data): + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + + +fds = None # Cache FederatedDataset + + +def load_data(partition_id, num_clients): + """Load partition HIGGS data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_clients) + fds = FederatedDataset( + dataset="jxie/higgs", + partitioners={"train": partitioner}, + ) + + # Load the partition for this `partition_id` + partition = fds.load_partition(partition_id, split="train") + partition.set_format("numpy") + + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=0.2, seed=42 + ) + + # Reformat data to DMatrix for xgboost + log(INFO, "Reformatting data...") + train_dmatrix = transform_dataset_to_dmatrix(train_data) + valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + + return train_dmatrix, valid_dmatrix, num_train, num_val + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/glossary/aggregation.mdx b/glossary/aggregation.mdx new file mode 100644 index 000000000000..82cadd6948bb --- /dev/null +++ b/glossary/aggregation.mdx @@ -0,0 +1,18 @@ +--- +title: "Aggregation" +description: "Combine model weights from sampled clients to update the global model. This process enables the global model to learn from each client's data." +date: "2024-05-23" +author: + name: "Charles Beauville" + position: "Machine Learning Engineer" + website: "https://www.linkedin.com/in/charles-beauville/" + github: "github.com/charlesbvll" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +During each Federated Learning round, the server will receive model weights from sampled clients and needs a function to improve its global model using those weights. This is what is called `aggregation`. It can be a simple weighted average function (like `FedAvg`), or can be more complex (e.g. incorporating optimization techniques). The aggregation is where FL's magic happens, it allows the global model to learn and improve from each client's particular data distribution with only their trained weights. + diff --git a/glossary/client.mdx b/glossary/client.mdx new file mode 100644 index 000000000000..52b14f124add --- /dev/null +++ b/glossary/client.mdx @@ -0,0 +1,17 @@ +--- +title: "Client" +description: "A client is any machine with local data that connects to a server, trains on received global model weights, and sends back updated weights. Clients may also evaluate global model weights." +date: "2024-05-23" +author: + name: "Charles Beauville" + position: "Machine Learning Engineer" + website: "https://www.linkedin.com/in/charles-beauville/" + github: "github.com/charlesbvll" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Any machine with access to some data that connects to a server to perform Federated Learning. During each round of FL (if it is sampled), it will receive global model weights from the server, train on the data they have access to, and send the resulting trained weights back to the server. Clients can also be sampled to evaluate the global server weights on the data they have access to, this is called federated evaluation. diff --git a/glossary/docker.mdx b/glossary/docker.mdx new file mode 100644 index 000000000000..9ca079b90f06 --- /dev/null +++ b/glossary/docker.mdx @@ -0,0 +1,22 @@ +--- +title: "Docker" +description: "Docker is a containerization tool that allows for consistent and reliable deployment of applications across different environments." +date: "2024-07-08" +author: + name: "Robert Steiner" + position: "DevOps Engineer at Flower Labs" + website: "https://github.com/Robert-Steiner" +--- + +Docker is an open-source containerization tool for deploying and running applications. Docker +containers encapsulate an application's code, dependencies, and configuration files, allowing +for consistent and reliable deployment across different environments. + +In the context of federated learning, Docker containers can be used to package the entire client +and server application, including all the necessary dependencies, and then deployed on various +devices such as edge devices, cloud servers, or even on-premises servers. + +In Flower, Docker containers are used to containerize various applications like `SuperLink`, +`SuperNode`, and `SuperExec`. Flower's Docker images allow users to quickly get Flower up and +running, reducing the time and effort required to set up and configure the necessary software +and dependencies. diff --git a/glossary/edge-computing.mdx b/glossary/edge-computing.mdx new file mode 100644 index 000000000000..6499a48e8f07 --- /dev/null +++ b/glossary/edge-computing.mdx @@ -0,0 +1,40 @@ +--- +title: "Edge Computing" +description: "Edge computing is a distributed computing concept of bringing compute and data storage as close as possible to the source of data generation and consumption by users." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "IoT" + link: "/glossary/iot" + - text: "Run Flower using Docker" + link: "/docs/framework/docker/index.html" + - text: "Flower Clients in C++" + link: "/docs/examples/quickstart-cpp.html" + - text: "Federated Learning on Embedded Devices with Flower" + link: "/docs/examples/embedded-devices.html" +--- + +### Introduction to Edge Computing + +Edge computing is a distributed computing concept of bringing compute and data storage as close as possible to the source of data generation and consumption by users. By performing computation close to the data source, edge computing aims to address limitations typically encountered in centralized computing, such as bandwidth, latency, privacy, and autonomy. + +Edge computing works alongside cloud and fog computing, but each serves different purposes. Cloud computing delivers on-demand resources like data storage, servers, analytics, and networking via the Internet. Fog computing, however, brings computing closer to devices by distributing communication and computation across clusters of IoT or edge devices. While edge computing is sometimes used interchangeably with fog computing, edge computing specifically handles data processing directly at or near the devices themselves, whereas fog computing distributes tasks across multiple nodes, bridging the gap between edge devices and the cloud. + +### Advantages and Use Cases of Edge Computing + +The key benefit of edge computing is that the volume of data moved is significantly reduced because computation runs directly on board the device on the acquired data. This reduces the amount of long-distance communication between machines, which improves latency and reduces transmissions costs. Examples of edge computing that benefit from offloading computation include: +1. Smart watches and fitness monitors that measure live health metrics. +2. Facial recognition and wake word detection on smartphones. +3. Real-time lane departure warning systems in road transport that detect lane lines using on-board videos and sensors. + +### Federated Learning in Edge Computing + +When deploying federated learning systems, edge computing is an important component to consider. Edge computing typically take the role of "clients" in federated learning. In a healthcare use case, servers in different hospitals can train models on their local data. In mobile computing, smartphones perform local training (and inference) on user data such as for next word prediction. + +### Edge Computing with Flower + +With the Flower framework, you can easily deploy federated learning workflows and maximise the use of edge computing resources. Flower provides the infrastructure to perform federated learning, federated evaluation, and federated analytics, all in a easy, scalable and secure way. Start with our tutorial on running Federated Learning on Embedded Devices (link [here](https://github.com/adap/flower/tree/main/examples/embedded-devices)), which shows you how to run Flower on NVidia Jetson devices and Raspberry Pis as your edge compute. diff --git a/glossary/evaluation.mdx b/glossary/evaluation.mdx new file mode 100644 index 000000000000..bf6b36cd0c4b --- /dev/null +++ b/glossary/evaluation.mdx @@ -0,0 +1,19 @@ +--- +title: "Evaluation" +description: "Evaluation measures how well the trained model performs by testing it on each client's local data, providing insights into its generalizability across varied data sources." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Evaluation in machine learning is the process of assessing a model's performance on unseen data to determine its ability to generalize beyond the training set. This typically involves using a separate test set and various metrics like accuracy or F1-score to measure how well the model performs on new data, ensuring it isn't overfitting or underfitting. + +In federated learning, evaluation (or distributed evaluation) refers to the process of assessing a model's performance across multiple clients, such as devices or data centers. Each client evaluates the model locally using its own data and then sends the results to the server, which aggregates all the evaluation outcomes. This process allows for understanding how well the model generalizes to different data distributions without centralizing sensitive data. \ No newline at end of file diff --git a/glossary/federated-learning.mdx b/glossary/federated-learning.mdx new file mode 100644 index 000000000000..5f6b8a7f1732 --- /dev/null +++ b/glossary/federated-learning.mdx @@ -0,0 +1,14 @@ +--- +title: "Federated Learning" +description: "Federated Learning is a machine learning approach where model training occurs on decentralized devices, preserving data privacy and leveraging local computations." +date: "2024-05-23" +author: + name: "Julian Rußmeyer" + position: "UX/UI Designer" + website: "https://www.linkedin.com/in/julian-russmeyer/" +related: + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Federated learning is an approach to machine learning in which the model is trained on multiple decentralized devices or servers with local data samples without exchanging them. Instead of sending raw data to a central server, updates to the model are calculated locally and only the model parameters are aggregated centrally. In this way, user privacy is maintained and communication costs are reduced, while collaborative model training is enabled. diff --git a/glossary/flower-datasets.mdx b/glossary/flower-datasets.mdx new file mode 100644 index 000000000000..24537dfe223b --- /dev/null +++ b/glossary/flower-datasets.mdx @@ -0,0 +1,27 @@ +--- +title: "Flower Datasets" +description: "Flower Datasets is a library that enables the creation of datasets for federated learning by partitioning centralized datasets to exhibit heterogeneity or using naturally partitioned datasets." +date: "2024-05-24" +author: + name: "Adam Narożniak" + position: "ML Engineer at Flower Labs" + website: "https://discuss.flower.ai/u/adam.narozniak/summary" +related: + - text: "Flower Datasets documentation" + link: "https://flower.ai/docs/datasets/" + - text: "Flower Datasets GitHub page" + link: "https://github.com/adap/flower/tree/main/datasets" +--- + +Flower Datasets is a library that enables the creation of datasets for federated learning/analytics/evaluation by partitioning centralized datasets to exhibit heterogeneity or using naturally partitioned datasets. It was created by the Flower Labs team, which also created Flower - a Friendly Federated Learning Framework. + +The key features include: +* downloading datasets (HuggingFace `datasets` are used under the hood), +* partitioning (simulate different levels of heterogeneity by using one of the implemented partitioning schemes or create your own), +* creating centralized datasets (easily utilize centralized versions of the datasets), +* reproducibility (repeat the experiments with the same results), +* visualization (display the created partitions), +* ML agnostic (easy integration with all popular ML frameworks). + + +It is a supplementary library to Flower, with which it integrates easily. diff --git a/glossary/grpc.mdx b/glossary/grpc.mdx new file mode 100644 index 000000000000..af58758d10bd --- /dev/null +++ b/glossary/grpc.mdx @@ -0,0 +1,44 @@ +--- +title: "gRPC" +description: "gRPC is an inter-process communication technology for building distributed apps. It allows developers to connect, invoke, operate, and debug apps as easily as making a local function call." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" + - text: "Protocol Buffers" + link: "/glossary/protocol-buffers" + - text: "Google: gRPC - A true internet scale RPC framework" + link: "https://cloud.google.com/blog/products/gcp/grpc-a-true-internet-scale-rpc-framework-is-now-1-and-ready-for-production-deployments" +--- + +### Introduction to gRPC + +gRPC is an inter-process communication technology for building distributed applications. It allows you to connect, invoke, operate, and debug these applications as easily as making a local function call. It can efficiently connect services in and across data centers. It is also applicable in the last mile of distributed computing to connect devices, mobile applications, and browsers to backend services. Supporting various languages like C++, Go, Java, and Python, and platforms like Android and the web, gRPC is a versatile framework for any environment. + +Google first [open-sourced gRPC in 2016](https://cloud.google.com/blog/products/gcp/grpc-a-true-internet-scale-rpc-framework-is-now-1-and-ready-for-production-deployments), basing it on their internal remote procedure call (RPC) framework, Stubby, designed to handle tens of billions of requests per second. Built on HTTP/2 and protocol buffers, gRPC is a popular high-performance framework for developers to built micro-services. Notable early adopters of gRPC include Square, Netflix, CockroachDB, Cisco, and Juniper Networks. + +By default, gRPC uses protocol buffers - Google's language-neutral and platform-neutral mechanism for efficiently serializing structured data - as its interface definition language and its underlying message interchange format. The recommended protocol buffer version as of writing is `proto3`, though other formats like JSON can also be used. + +### How does it work? + +gRPC operates similarly to many RPC systems. First, you specify the methods that can be called remotely on the server application, along with their parameters and return type. Then, with the appropriate code (more on this below), a gRPC client application can directly call these methods on the gRPC server application on a different machine as if it were a local object. Note that the definitions of client and server in gRPC is different to federated learning. For clarity, we will refer to client (server) applications in gRPC as gRPC client (server) applications. + +To use gRPC, follow these steps: +1. Define structure for the data you want to serialize in a proto file definition. `*.proto`. +2. Run the protocol buffer compiler `protoc` to generate to data access classes in the preferred language from the `*.proto` service definitions. This step generates the gRPC client and server code, as well as the regular protocol buffer code for handling your message types. +3. Use the generated class in your application to populate, serialize, and retrieve the class protocol buffer messages. + +### Use cases in Federated Learning + +There are several reasons why gRPC is particularly useful in federated learning. First, clients and server in a federation rely on stable and efficient communication. Using Protobuf, a highly efficient binary serialization format, gRPC overcomes the bandwidth limitations in federated learning, such as in low-bandwidth mobile connections. Second, gRPC’s language-independent communication allows developers to use a variety of programming languages, enabling broader adoption for on-device executions. + +### gRPC in Flower + +gRPC's benefits for distributed computing make it a natural choice for the Flower framework. Flower uses gRPC as its primary communication protocol. To make it easier to build your federated learning systems, we have introduced high-level APIs to take care of the serialization and deserialization of the model parameters, configurations, and metrics. For more details on how to use Flower, follow our "Get started with Flower" tutorial here. diff --git a/glossary/inference.mdx b/glossary/inference.mdx new file mode 100644 index 000000000000..06c93a834d2d --- /dev/null +++ b/glossary/inference.mdx @@ -0,0 +1,21 @@ +--- +title: "Inference" +description: "Inference is the phase in which a trained machine learning model applies its learned patterns to new, unseen data to make predictions or decisions." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Inference, also known as model prediction, is the stage in the machine learning workflow where a trained model is used to make predictions based on new, unseen data. In a typical machine learning setting, model inference involves the following steps: model loading, where the trained model is loaded into the application or service where it will be used; data preparation, which preprocess the new data in the same way as the training data; and model prediction, where the prepared data is fed into the model to compute outputs based on the learned patterns during training. + +In the context of federated learning (FL), inference can be performed locally on the user's device. A global model updated from FL process is deployed and loaded on individual nodes (e.g., smartphones, hospital servers) for local inference. This allows for keeping all data on-device, enhancing privacy and reducing latency. diff --git a/glossary/iot.mdx b/glossary/iot.mdx new file mode 100644 index 000000000000..ec1932c444f3 --- /dev/null +++ b/glossary/iot.mdx @@ -0,0 +1,48 @@ +--- +title: "IoT" +description: "The Internet of Things (IoT) refers to devices with sensors, software, and tech that connect and exchange data with other systems via the internet or communication networks." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Edge Computing" + link: "/glossary/edge-computing" + - text: "Run Flower using Docker" + link: "/docs/framework/docker/index.html" + - text: "Flower Clients in C++" + link: "/docs/examples/quickstart-cpp.html" + - text: "Federated Learning on Embedded Devices with Flower" + link: "/docs/examples/embedded-devices.html" + - text: "Cisco: Redefine Connectivity by Building a Network to Support the Internet of Things" + link: "https://www.cisco.com/c/en/us/solutions/service-provider/a-network-to-support-iot.html" +--- + +### Introduction to IoT + +The Internet of Things (IoT) describe devices with sensors, processing ability, software, and other technologies that connect and exchange data with other devices and systems over the Internet or other communications networks. IoT is often also referred as Machine-to-Machine (M2M) connections. Examples of IoT include embedded systems, wireless sensor networks, control systems, automation (home and building). In the consumer market, IoT technology is synonymous with smart home products. The IoT architecture bears resemblance to edge computing, but more broadly encompasses edge devices, gateways, and the cloud. + +### Use cases in Federated Learning + +From the perspective of federated learning, IoT systems provide two common configurations: first as a data source for training, and second as a point for running inference/analytics. + +Cisco's Global Cloud Index estimated that nearly 850 Zettabytes (ZB) of data will be generated by all people, machines and things in 2021 ([link](https://www.cisco.com/c/en/us/solutions/service-provider/a-network-to-support-iot.html) to article). In IoT, the data is different because not all of the data needs to be stored and instead, the most impactful business values come from running computations on the data. This positions IoT as an ideal candidate for implementing federated learning systems, where a model trained on a datastream from a single device may not be useful, but when trained collaboratively on hundreds or thousands of devices, yields a better performing and generalisable model. The key benefit is that the generated data remains local on the device and can even be offloaded after multiple rounds of federated learning. Some examples are presented below. + +Once a model is trained (e.g. in a federated way), the model can be put into production. What this means is to deploy the model on the IoT device and compute predictions based on the newly generated/acquired data. + +Federated learning in IoT can be organized on two axes: by industry and by use cases. + +For industry applications, examples include: +1. Healthcare - e.g. vital sign, activity levels, or sleep pattern monitoring using fitness trackers. +2. Transportation - e.g. trajectory prediction, object detection, driver drowsiness detection using on-board sensors and cameras. + +For use cases, examples include: +1. Predictive maintenance - e.g. using data acquired from physical sensors (impedence, temperature, vibration, pressure, viscosity, etc ...) +2. Anomaly detection - e.g. using environmental monitoring sensors for predicting air, noise, or water pollution, using internet network traffic data for network intrusion detection, using fiber optic sensors for remote sensing and monitoring, etc ... +3. Quality assurance and quality control - e.g. using in-line optical, acoustic, or sensor data during manufacturing processes to identify faulty products, etc ... + +### Using Flower for Federated Learning with IoT + +Flower is developed with a deployment engine that allows you to easily deploy your federated learning system on IoT devices. As a Data Scientist/ML Engineer, you will only need to write ClientApps and deploy them to IoT devices without needing to deal with the infrastructure and networking. To further help deployment, we provide [Docker images](https://hub.docker.com/u/flwr) for the SuperLink, SuperNode, and ServerApp so that you can easily ship the requirements of your Flower applications in containers in a production environment. Lastly, Flower supports the development of both Python and C++ clients, which provides developers with flexible ways of building ClientApps for resource-contrained devices. diff --git a/glossary/medical-ai.mdx b/glossary/medical-ai.mdx new file mode 100644 index 000000000000..d557f457c189 --- /dev/null +++ b/glossary/medical-ai.mdx @@ -0,0 +1,24 @@ +--- +title: "Medical AI" +description: "Medical AI involves the application of artificial intelligence technologies to healthcare, enhancing diagnosis, treatment planning, and patient monitoring by analyzing complex medical data." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Medical AI refers to the application of artificial intelligence technologies, particularly machine learning algorithms, to medical and healthcare-related fields. This includes, but is not limited to, tasks such as disease diagnosis, personalized treatment plans, drug development, medical imaging analysis, and healthcare management. The goal of Medical AI is to enhance healthcare services, improve treatment outcomes, reduce costs, and increase efficiency within healthcare systems. + +Federated learning (FL) introduces a novel approach to the training of machine learning models across multiple decentralized devices or servers holding local data samples, without exchanging them. This is particularly appropriate in the medical field due to the sensitive nature of medical data and strict privacy requirements. It leverages the strength of diverse datasets without compromising patient confidentiality, making it an increasingly popular choice in Medical AI applications. + +#### Medical AI in Flower +Flower, a friendly FL framework, is developing a more versatile and privacy-enhancing solution for Medical AI through the use of FL. Please check out [Flower industry healthcare](flower.ai/industry/healthcare) website for more detailed information. diff --git a/glossary/model-training.mdx b/glossary/model-training.mdx new file mode 100644 index 000000000000..ba5923962f1b --- /dev/null +++ b/glossary/model-training.mdx @@ -0,0 +1,24 @@ +--- +title: "Model Training" +description: "Model training is the process of teaching an algorithm to learn from data to make predictions or decisions." +date: "2024-07-12" +author: + name: "Yan Gao" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/yan-gao/" + github: "github.com/yan-gao-GY" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Server" + link: "/glossary/server" + - text: "Client" + link: "/glossary/client" +--- + +Model training is a core component of developing machine learning (ML) systems, where an algorithm learns from data to make predictions or decisions. A typical model training process involves several key steps: dataset preparation, feature selection and engineering, choice of model based on the task (e.g., classification, regression), choice of training algorithm (e.g. optimizer), and model iteration for updating its weights and biases to minimize the loss function, which measures the difference between the predicted and actual outcomes on the training data. The traditional ML model training process typically involves considerable manual effort, whereas deep learning (DL) offers an end-to-end automated process. + +This approach assumes easy access to data and often requires substantial computational resources, depending on the size of the dataset and complexity of the model. However, large amounts of the data in the real world is distributed and protected due to privacy concerns, making it inaccessible for typical (centralized) model training. Federated learning (FL) migrates the model training from data center to local user ends. After local training, each participant sends only their model's updates (not the data) to a central server for aggregation. The updated global model is sent back to the participants for further rounds of local training and updates. This way, the model training benefits from diverse, real-world data without compromising individual data privacy. + +#### Model training in Flower +Flower, a friendly FL framework, offers a wealth of model training examples and baselines tailored for federated environments. Please refer to the [examples](https://flower.ai/docs/examples/) and [baselines](https://flower.ai/docs/baselines/) documentation for more detailed information. diff --git a/glossary/platform-independence.mdx b/glossary/platform-independence.mdx new file mode 100644 index 000000000000..9582fae057ff --- /dev/null +++ b/glossary/platform-independence.mdx @@ -0,0 +1,19 @@ +--- +title: "Platform Independence" +description: "The capability to run program across different hardware and operating systems." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" +--- + +Platform independence in federated learning refers to the capability of machine learning systems to operate seamlessly across various hardware and operating system environments. This ensures that the federated learning process can function effectively on various devices with different operating systems such as Windows, Linux, Mac OS, iOS, and Android without requiring platform-specific modifications. By achieving platform independence, federated learning frameworks enable efficient data analysis and model training across heterogeneous edge devices, enhancing scalability and flexibility in distributed machine learning scenarios. + +### Platform Independence in Flower + +Flower is interoperable with different operating systems and hardware platforms to work well in heterogeneous edge device environments. \ No newline at end of file diff --git a/glossary/protocol-buffers.mdx b/glossary/protocol-buffers.mdx new file mode 100644 index 000000000000..7e9bf6c7bbc7 --- /dev/null +++ b/glossary/protocol-buffers.mdx @@ -0,0 +1,31 @@ +--- +title: "Protocol Buffers" +description: "Protocol Buffers, often abbreviated as Protobuf, are a language-neutral, platform-neutral, extensible mechanism for serializing structured data, similar to XML but smaller, faster, and simpler." +date: "2024-05-24" +author: + name: "Taner Topal" + position: "Co-Creator and CTO @ Flower Labs" + website: "https://www.linkedin.com/in/tanertopal/" + github: "github.com/tanertopal" +related: + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +### Introduction to Protocol Buffers + +Protocol Buffers, often abbreviated as Protobuf, are a language-neutral, platform-neutral, extensible mechanism for serializing structured data, similar to XML but smaller, faster, and simpler. The method involves defining how you want your data to be structured once, then using language specific generated source code to write and read structured data to and from a variety of data streams. + +### How Protocol Buffers Work + +Protocol Buffers require a `.proto` file where the data structure (the messages) is defined. This is essentially a schema describing the data to be serialized. Once the `.proto` file is prepared, it is compiled using the Protobuf compiler (`protoc`), which generates data access classes in supported languages like Java, C++, Python, Swift, Kotlin, and more. These classes provide simple accessors for each field (like standard getters and setters) and methods to serialize the entire structure to a binary format that can be easily transmitted over network protocols or written to a file. + +### Advantages and Use Cases + +The primary advantages of Protocol Buffers include their simplicity, efficiency, and backward compatibility. They are more efficient than XML or JSON as they serialize to a binary format, which makes them both smaller and faster. They support backward compatibility, allowing to modify data structures without breaking deployed programs that are communicating using the protocol. This makes Protobuf an excellent choice for data storage or RPC (Remote Procedure Call) applications where small size, low latency, and schema evolution are critical. + +### Protocol Buffers in Flower + +In the context of Flower, Protocol Buffers play a crucial role in ensuring efficient and reliable communication between the server and clients. Federated learning involves heterogeneous clients (e.g., servers, mobile devices, edge devices) running different environments and programming languages. This setup requires frequent exchanges of model updates and other metadata between the server and clients. Protocol Buffers, with their efficient binary serialization, enable Flower to handle these exchanges with minimal overhead, ensuring low latency and reducing the bandwidth required for communication. Moreover, the backward compatibility feature of Protobuf allows Flower to evolve and update its communication protocols without disrupting existing deployments. Best of all, Flower users typically do not have to deal directly with Protobuf, as Flower provides language-specific abstractions that simplify interaction with the underlying communication protocols. diff --git a/glossary/scalability.mdx b/glossary/scalability.mdx new file mode 100644 index 000000000000..4bfb736ff08c --- /dev/null +++ b/glossary/scalability.mdx @@ -0,0 +1,22 @@ +--- +title: "Scalability" +description: "Scalability ensures systems grow with demand. In Federated Learning, it involves efficiently managing dynamic clients and diverse devices. Flower supports large-scale FL on various devices/ resources." +date: "2024-05-23" +author: + name: "Daniel Nata Nugraha" + position: "Software Engineer" + image: "daniel_nata_nugraha.png" + website: "https://www.linkedin.com/in/daniel-nugraha/" + github: "github.com/danielnugraha" +related: + - text: "Flower Paper" + link: "https://arxiv.org/pdf/2007.14390" + - text: "Federated Learning" + link: "/glossary/federated-learning" + - text: "Tutorial: What is Federated Learning?" + link: "/docs/framework/tutorial-series-what-is-federated-learning.html" +--- + +Scalability is the ability of a system, network, or process to accommodate an increasing amount of work. This involves adding resources (like servers) or optimizing existing ones to maintain or enhance performance. There are two main types of scalability: horizontal scalability (adding more nodes, such as servers) and vertical scalability (adding more power to existing nodes, like increasing CPU or RAM). Ideally, a scalable system can do both, seamlessly adapting to increased demands without significant downtime. Scalability is essential for businesses to grow while ensuring services remain reliable and responsive. +Scalability in Federated Learning involves managing dynamic client participation, as clients may join or leave unpredictably. This requires algorithms that adapt to varying availability and efficiently aggregate updates from numerous models. Additionally, scalable federated learning systems must handle heterogeneous client devices with different processing powers, network conditions, and data distributions, ensuring balanced contributions to the global model. +Scalability in Flower means efficiently conducting large-scale federated learning (FL) training and evaluation. Flower enables researchers to launch FL experiments with many clients using reasonable computing resources, such as a single machine or a multi-GPU rack. Flower supports scaling workloads to millions of clients, including diverse devices like Raspberry Pis, Android and iOS mobile devices, laptops, etc. It offers complete control over connection management and includes a virtual client engine for large-scale simulations. diff --git a/glossary/server.mdx b/glossary/server.mdx new file mode 100644 index 000000000000..efc25a227791 --- /dev/null +++ b/glossary/server.mdx @@ -0,0 +1,17 @@ +--- +title: "Server" +description: "The central entity coordinating the aggregation of local model updates from multiple clients to build a comprehensive, privacy-preserving global model." +date: "2024-07-08" +author: + name: "Heng Pan" + position: "Research Scientist" + website: "https://discuss.flower.ai/u/pan-h/summary" + github: "github.com/panh99" +related: + - text: "Client" + link: "/glossary/client" + - text: "Federated Learning" + link: "/glossary/federated-learning" +--- + +A server in federated learning plays a pivotal role by managing the distributed training process across various clients. Each client independently trains its local model using the local data and then sends the model updates to the server. The server aggregates the received updates to create a new global model, which is subsequently sent back to the clients. This iterative process allows the global model to improve over time without the need for the clients to share their raw data, ensuring data privacy and minimizing data transfer. \ No newline at end of file diff --git a/glossary/xgboost.mdx b/glossary/xgboost.mdx new file mode 100644 index 000000000000..51b5a2912e0b --- /dev/null +++ b/glossary/xgboost.mdx @@ -0,0 +1,34 @@ +--- +title: "XGBoost" +description: "XGBoost - or eXtreme Gradient Boosting - is an open-source library providing a regularizing gradient boosting decisiong tree framework for many programming languages including Python, C++, and Java." +date: "2024-09-10" +author: + name: "Chong Shen Ng" + position: "Research Engineer @ Flower Labs" + website: "https://discuss.flower.ai/u/chongshenng" + github: "github.com/chongshenng" +related: + - text: "Quickstart Federated Learning with XGBoost and Flower" + link: "/docs/framework/tutorial-quickstart-xgboost.html" + - text: "Flower Example using XGBoost (Comprehensive)" + link: "/docs/examples/xgboost-comprehensive.html" +--- + +### Introduction to XGBoost + +XGBoost - or eXtreme Gradient Boosting - is an open-source library which provides a regularizing gradient boosting framework for Python, C++, Java, R, Julia, Perl, and Scala. It implements machine learning algorithms based on the gradient boosting concept, where a single model is created from an ensemble of weak learners (decision trees). This is commonly referred as a Gradient Boosting Decision Trees (GBDT), a decision tree ensemble learning algorithm. + +GBDTs are commonly compared with the random forest algorithm. They are similar in the sense that they build multiple decision trees. But the key differences are in how they are built and combined. Random forest first builds full decision trees in parallel from bootstrap samples of the dataset, and then generates the final prediction based on an average of all of the predictions. In contrast, GBDT iteratively trains decision trees with the objective that each subsequent tree reduces the error residuals of the previous model - this is the concept of boosting. The final prediction in a GBDT is a weighted sum of all of the tree predictions. While the bootstrap aggregation method of random forest minimizes variance and overfitting, the boosting method of GBDT minimizes bias and underfitting. + +XGBoost includes many features that optimizes the implementation of GBDT, including parallelized trees training (instead of sequential) and integration with distributed processing frameworks like Apache Spark and Dask. These various performance improvements have historically made XGBoost the preferred framework of choice when training models for supervised learning tasks, and have seen widespread success in Kaggle competitions on structured data. + +### Use cases in Federated Learning + +While there is no way to know before hand what model would perform the best in federated learning, XGBoost is appealing for several reasons: +1. To train the first model, XGBoost hyperparameters require significantly less tuning compared to neural network-based models. +2. XGBoost is known to produce models that perform far better than neural networks on tabular datasets, which can be encountered in real-world federated learning systems such as in healthcare or IoT applications. +3. Feature scaling is unnecessary when training XGBoost models. This not only facilitates fine-tuning on new data distributions, but also supports cross-device and cross-silo federated learning, where the data distributions from participating clients are not know a priori. + +### XGBoost in Flower + +In Flower, we have provided two strategies for performing federated learning with XGBoost: [`FedXgbBagging`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedxgb_bagging.py) and [`FedXgbCyclic`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedxgb_cyclic.py), which are inspired from the work at Nvidia NVFlare. These implementations allow Flower users to use different aggregation strategies for the XGBoost model. `FedXgbBagging` aggregates trees from all participating clients every round, whereas `FedXgbCyclic` aggregates clients' trees sequentially in a round-robin manner. With these strategies, Flower users can very quickly and easily run and compare the performance of federated learning systems on distributed tabular datasets using state-of-the-art XGBoost aggregation strategies, without needing to implement them from scratch. diff --git a/pyproject.toml b/pyproject.toml index 0d0138a5689b..2b789fc3d623 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "flwr" -version = "1.11.0" +version = "1.13.0" description = "Flower: A Friendly Federated Learning Framework" license = "Apache-2.0" authors = ["The Flower Authors "] @@ -30,7 +30,6 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -44,38 +43,39 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", "Typing :: Typed", ] -packages = [ - { include = "flwr", from = "src/py" }, -] -exclude = [ - "src/py/**/*_test.py", -] +packages = [{ include = "flwr", from = "src/py" }] +exclude = ["src/py/**/*_test.py"] [tool.poetry.scripts] +# `flwr` CLI flwr = "flwr.cli.app:app" -flower-superlink = "flwr.server:run_superlink" -flower-superexec = "flwr.superexec:run_superexec" -flower-supernode = "flwr.client:run_supernode" -flower-client-app = "flwr.client:run_client_app" -flower-server-app = "flwr.server:run_server_app" +# SuperExec (can run with either Deployment Engine or Simulation Engine) +flower-superexec = "flwr.superexec.app:run_superexec" # Deprecated +# Simulation Engine flower-simulation = "flwr.simulation.run_simulation:run_simulation_from_cli" +# Deployment Engine +flower-superlink = "flwr.server.app:run_superlink" +flower-supernode = "flwr.client.supernode.app:run_supernode" +flwr-serverapp = "flwr.server.serverapp:flwr_serverapp" +flower-server-app = "flwr.server.run_serverapp:run_server_app" # Deprecated flwr-clientapp = "flwr.client.clientapp:flwr_clientapp" +flower-client-app = "flwr.client.supernode:run_client_app" # Deprecated [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" # Mandatory dependencies numpy = "^1.21.0" -grpcio = "^1.60.0,!=1.64.2,!=1.65.1,!=1.65.2,!=1.65.4" +grpcio = "^1.60.0,!=1.64.2,!=1.65.1,!=1.65.2,!=1.65.4,!=1.65.5,!=1.66.0,!=1.66.1" protobuf = "^4.25.2" cryptography = "^42.0.4" pycryptodome = "^3.18.0" iterators = "^0.0.2" -typer = { version = "^0.9.0", extras = ["all"] } +typer = "^0.12.5" tomli = "^2.0.1" tomli-w = "^1.0.0" pathspec = "^0.12.1" # Optional dependencies (Simulation Engine) -ray = { version = "==2.10.0", optional = true, python = ">=3.8,<3.12" } +ray = { version = "==2.10.0", optional = true, python = ">=3.9,<3.12" } # Optional dependencies (REST transport layer) requests = { version = "^2.31.0", optional = true } starlette = { version = "^0.31.0", optional = true } @@ -93,10 +93,12 @@ types-setuptools = "==69.0.0.20240125" clang-format = "==17.0.6" isort = "==5.13.2" black = { version = "==24.2.0", extras = ["jupyter"] } +taplo = "==0.9.3" docformatter = "==1.7.5" mypy = "==1.8.0" -pylint = "==3.0.3" +pylint = "==3.3.1" flake8 = "==5.0.4" +parameterized = "==0.9.0" pytest = "==7.4.4" pytest-cov = "==4.1.0" pytest-watcher = "==0.4.1" @@ -105,25 +107,24 @@ mypy-protobuf = "==3.2.0" jupyterlab = "==4.0.12" rope = "==1.11.0" semver = "==3.0.2" -sphinx = "==6.2.1" +sphinx = "==7.4.7" sphinx-intl = "==2.2.0" -sphinx-click = "==5.1.0" -myst-parser = "==1.0.0" -sphinx-design = "==0.5.0" +sphinx-click = "==6.0.0" +myst-parser = "==3.0.1" +sphinx-design = "==0.6.1" sphinx-copybutton = "==0.5.2" sphinxcontrib-mermaid = "==0.9.2" sphinxcontrib-youtube = "==1.4.1" -furo = "==2023.9.10" -sphinx-reredirects = "==0.1.3" -nbsphinx = "==0.9.4" +furo = "==2024.8.6" +sphinx-reredirects = "==0.1.5" +nbsphinx = "==0.9.5" nbstripout = "==0.6.1" ruff = "==0.1.9" sphinx-argparse = "==0.4.0" pipreqs = "==0.4.13" -mdformat-gfm = "==0.3.5" +mdformat-gfm = "==0.3.6" mdformat-frontmatter = "==2.0.1" mdformat-beautysh = "==0.1.1" -mdformat-myst = "==0.1.5" twine = "==5.1.1" pyroma = "==4.2" check-wheel-contents = "==0.4.0" @@ -132,6 +133,17 @@ PyGithub = "==2.1.1" licensecheck = "==2024" pre-commit = "==3.5.0" sphinx-substitution-extensions = "2022.02.16" +sphinxext-opengraph = "==0.9.1" +docstrfmt = { git = "https://github.com/charlesbvll/docstrfmt.git", branch = "patch-2" } +docsig = "==0.64.0" + +[tool.docstrfmt] +extend_exclude = [ + "doc/source/conf.py", + "doc/source/tutorial-quickstart-huggingface.rst", + "doc/source/_templates/autosummary/*", + "doc/source/ref-api/*", +] [tool.isort] profile = "black" @@ -139,7 +151,7 @@ known_first_party = ["flwr", "flwr_tool"] [tool.black] line-length = 88 -target-version = ["py38", "py39", "py310", "py311"] +target-version = ["py39", "py310", "py311"] [tool.pylint."MESSAGES CONTROL"] disable = "duplicate-code,too-few-public-methods,useless-import-alias" @@ -147,10 +159,7 @@ disable = "duplicate-code,too-few-public-methods,useless-import-alias" [tool.pytest.ini_options] minversion = "6.2" addopts = "-qq" -testpaths = [ - "src/py/flwr", - "src/py/flwr_tool", -] +testpaths = ["src/py/flwr", "src/py/flwr_tool"] filterwarnings = "ignore::DeprecationWarning" [tool.pytest-watcher] @@ -163,17 +172,12 @@ patterns = ["*.py"] ignore_patterns = [] [tool.mypy] -plugins = [ - "numpy.typing.mypy_plugin", -] +plugins = ["numpy.typing.mypy_plugin"] ignore_missing_imports = true strict = true [[tool.mypy.overrides]] -module = [ - "importlib.metadata.*", - "importlib_metadata.*", -] +module = ["importlib.metadata.*", "importlib_metadata.*"] follow_imports = "skip" follow_imports_for_stubs = true disallow_untyped_calls = false @@ -188,7 +192,7 @@ wrap-summaries = 88 wrap-descriptions = 88 [tool.ruff] -target-version = "py38" +target-version = "py39" line-length = 88 select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"] @@ -222,3 +226,7 @@ convention = "numpy" [tool.ruff.per-file-ignores] "src/py/flwr/server/strategy/*.py" = ["E501"] + +[tool.docsig] +ignore-no-params = true +exclude = 'src/py/flwr/proto/.*|src/py/flwr/.*_test\.py|src/py/flwr/cli/new/templates/.*\.tpl' diff --git a/src/docker/base/README.md b/src/docker/base/README.md new file mode 100644 index 000000000000..ef290a26fec4 --- /dev/null +++ b/src/docker/base/README.md @@ -0,0 +1,54 @@ +# Flower Base + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0-py3.11-alpine3.19` +- `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0-py3.11-alpine3.19` +- `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0-py3.11-alpine3.19` +- `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` +- `1.8.0-py3.11-alpine3.19` +- `1.8.0-py3.11-ubuntu22.04` +- `1.8.0-py3.10-ubuntu22.04` +- `1.8.0-py3.9-ubuntu22.04` +- `1.8.0-py3.8-ubuntu22.04` diff --git a/src/docker/base/alpine/Dockerfile b/src/docker/base/alpine/Dockerfile index 441e0fdd9b85..ee1e11b2d070 100644 --- a/src/docker/base/alpine/Dockerfile +++ b/src/docker/base/alpine/Dockerfile @@ -33,6 +33,8 @@ RUN apk add --no-cache \ # require for compiling grpcio on ARM64 g++ \ libffi-dev \ + # required for installing flwr via git + git \ # create virtual env && python -m venv /python/venv @@ -42,18 +44,27 @@ ENV PATH=/python/venv/bin:$PATH # Install specific version of pip, setuptools and flwr ARG PIP_VERSION ARG SETUPTOOLS_VERSION -ARG FLWR_VERSION -ARG FLWR_PACKAGE=flwr RUN pip install -U --no-cache-dir \ pip==${PIP_VERSION} \ - setuptools==${SETUPTOOLS_VERSION} \ - ${FLWR_PACKAGE}==${FLWR_VERSION} + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi FROM python:${PYTHON_VERSION}-${DISTRO}${DISTRO_VERSION} AS base -# Upgrade system Python pip and setuptools -# hadolint ignore=DL3013 -RUN pip install -U --no-cache-dir pip setuptools +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} # required by the grpc package RUN apk add --no-cache \ diff --git a/src/docker/base/ubuntu/Dockerfile b/src/docker/base/ubuntu/Dockerfile index 31cc8381b7c5..b52599a80784 100644 --- a/src/docker/base/ubuntu/Dockerfile +++ b/src/docker/base/ubuntu/Dockerfile @@ -15,7 +15,7 @@ # hadolint global ignore=DL3008 ARG DISTRO=ubuntu -ARG DISTRO_VERSION=22.04 +ARG DISTRO_VERSION=24.04 FROM $DISTRO:$DISTRO_VERSION AS python ENV DEBIAN_FRONTEND=noninteractive @@ -32,7 +32,7 @@ RUN apt-get update \ # Install PyEnv and Python ARG PYTHON_VERSION=3.11 ENV PYENV_ROOT=/root/.pyenv -ENV PATH $PYENV_ROOT/bin:$PATH +ENV PATH=$PYENV_ROOT/bin:$PATH # https://github.com/hadolint/hadolint/wiki/DL4006 SHELL ["/bin/bash", "-o", "pipefail", "-c"] RUN curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash @@ -50,22 +50,29 @@ RUN LATEST=$(pyenv latest -k ${PYTHON_VERSION}) \ ENV PATH=/usr/local/bin/python/bin:$PATH -# Upgrade system Python pip and setuptools -# hadolint ignore=DL3013 -RUN pip install -U --no-cache-dir pip setuptools \ +ARG PIP_VERSION +ARG SETUPTOOLS_VERSION +# Keep the version of system Python pip and setuptools in sync with those installed in the +# virtualenv. +RUN pip install -U --no-cache-dir pip==${PIP_VERSION} setuptools==${SETUPTOOLS_VERSION} \ # Use a virtual environment to ensure that Python packages are installed in the same location # regardless of whether the subsequent image build is run with the app or the root user && python -m venv /python/venv ENV PATH=/python/venv/bin:$PATH -ARG PIP_VERSION -ARG SETUPTOOLS_VERSION -ARG FLWR_VERSION -ARG FLWR_PACKAGE=flwr RUN pip install -U --no-cache-dir \ pip==${PIP_VERSION} \ - setuptools==${SETUPTOOLS_VERSION} \ - ${FLWR_PACKAGE}==${FLWR_VERSION} + setuptools==${SETUPTOOLS_VERSION} + +ARG FLWR_VERSION +ARG FLWR_VERSION_REF +ARG FLWR_PACKAGE=flwr +# hadolint ignore=DL3013 +RUN if [ -z "${FLWR_VERSION_REF}" ]; then \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}==${FLWR_VERSION}; \ + else \ + pip install -U --no-cache-dir ${FLWR_PACKAGE}@${FLWR_VERSION_REF}; \ + fi FROM $DISTRO:$DISTRO_VERSION AS base @@ -80,11 +87,10 @@ RUN apt-get update \ ca-certificates \ && rm -rf /var/lib/apt/lists/* \ # add non-root user - && adduser \ + && useradd \ --no-create-home \ - --home /app \ - --disabled-password \ - --gecos "" \ + --home-dir /app \ + -c "" \ --uid 49999 app \ && mkdir -p /app \ && chown -R app:app /app diff --git a/src/docker/clientapp/README.md b/src/docker/clientapp/README.md new file mode 100644 index 000000000000..a610de66eeae --- /dev/null +++ b/src/docker/clientapp/README.md @@ -0,0 +1,36 @@ +# Flower ClientApp + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` diff --git a/src/docker/complete/compose.yml b/src/docker/complete/compose.yml index 90261249f322..b21189d94123 100644 --- a/src/docker/complete/compose.yml +++ b/src/docker/complete/compose.yml @@ -1,17 +1,24 @@ services: # create a SuperLink service superlink: - image: flwr/superlink:${FLWR_VERSION:-1.10.0} + image: flwr/superlink:${FLWR_VERSION:-1.12.0} command: - --insecure # create a SuperExec service superexec: - user: root build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/superexec:${FLWR_VERSION:-1.10.0} + FROM flwr/superexec:${FLWR_VERSION:-1.12.0} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . @@ -29,89 +36,146 @@ services: - superlink="superlink:9091" depends_on: - superlink - volumes: - - apps-volume:/app/.flwr/apps/:rw # create a two SuperNode service with different node configs supernode-1: - user: root - deploy: - resources: - limits: - cpus: "2" + image: flwr/supernode:${FLWR_VERSION:-1.12.0} command: + - --insecure - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + depends_on: + - superlink + + supernode-2: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: - --insecure + - --superlink + - superlink:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" depends_on: - superlink - volumes: - - apps-volume:/app/.flwr/apps/:ro + + # uncomment to add another SuperNode + # + # supernode-3: + # image: flwr/supernode:${FLWR_VERSION:-1.12.0} + # command: + # - --insecure + # - --superlink + # - superlink:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # depends_on: + # - superlink + + clientapp-1: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=0,num-partitions=2"] - - supernode-2: - user: root + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-1:9094 deploy: resources: limits: cpus: "2" - command: - - --superlink - - superlink:9092 - - --insecure + stop_signal: SIGINT depends_on: - - superlink - volumes: - - apps-volume:/app/.flwr/apps/:ro + - supernode-1 + + clientapp-2: build: context: ${PROJECT_DIR:-.} dockerfile_inline: | - FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # gcc is required for the fastai quickstart example + USER root + RUN apt-get update \ + && apt-get -y --no-install-recommends install \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + USER app WORKDIR /app COPY --chown=app:app pyproject.toml . RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ && python -m pip install -U --no-cache-dir . - ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=1,num-partitions=2"] + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-2:9095 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-2 - # uncomment to add another supernode + # uncomment to add another ClientApp # - # supernode-3: - # user: root - # deploy: - # resources: - # limits: - # cpus: "2" - # command: - # - --superlink - # - superlink:9092 - # - --insecure - # depends_on: - # - superlink - # volumes: - # - apps-volume:/app/.flwr/apps/:ro + # clientapp-3: # build: # context: ${PROJECT_DIR:-.} # dockerfile_inline: | - # FROM flwr/supernode:${FLWR_VERSION:-1.10.0} + # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # # gcc is required for the fastai quickstart example + # USER root + # RUN apt-get update \ + # && apt-get -y --no-install-recommends install \ + # build-essential \ + # && rm -rf /var/lib/apt/lists/* + # USER app # WORKDIR /app # COPY --chown=app:app pyproject.toml . # RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ # && python -m pip install -U --no-cache-dir . - # ENTRYPOINT ["flower-supernode", "--node-config", "partition-id=0,num-partitions=2"] - -volumes: - apps-volume: + # ENTRYPOINT ["flwr-clientapp"] + # command: + # - --supernode + # - supernode-3:9096 + # deploy: + # resources: + # limits: + # cpus: "2" + # stop_signal: SIGINT + # depends_on: + # - supernode-3 diff --git a/src/docker/complete/with-tls.yml b/src/docker/complete/with-tls.yml index 1b8540e09b64..6cbeb2ba7397 100644 --- a/src/docker/complete/with-tls.yml +++ b/src/docker/complete/with-tls.yml @@ -17,7 +17,7 @@ services: - --executor - flwr.superexec.deployment:executor - --executor-config - - superlink="superlink:9091",root-certificates="certificates/superlink-ca.crt" + - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" - --ssl-ca-certfile=certificates/ca.crt - --ssl-certfile=certificates/server.pem - --ssl-keyfile=certificates/server.key @@ -35,6 +35,12 @@ services: command: - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" - --root-certificates - certificates/ca.crt secrets: @@ -45,18 +51,30 @@ services: command: - --superlink - superlink:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" - --root-certificates - certificates/ca.crt secrets: - source: superlink-ca-certfile target: /app/certificates/ca.crt - # uncomment to enable TLS on another supernode + # uncomment to enable TLS on another SuperNode # # supernode-3: # command: # - --superlink # - superlink:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" # - --root-certificates # - certificates/ca.crt # secrets: diff --git a/src/docker/distributed/.gitignore b/src/docker/distributed/.gitignore new file mode 100644 index 000000000000..1a11330c6e95 --- /dev/null +++ b/src/docker/distributed/.gitignore @@ -0,0 +1,3 @@ +superexec-certificates +superlink-certificates +server/state diff --git a/src/docker/distributed/certs.yml b/src/docker/distributed/certs.yml new file mode 100644 index 000000000000..48e157582e40 --- /dev/null +++ b/src/docker/distributed/certs.yml @@ -0,0 +1,6 @@ +services: + gen-certs: + build: + args: + SUPERLINK_IP: ${SUPERLINK_IP:-127.0.0.1} + SUPEREXEC_IP: ${SUPEREXEC_IP:-127.0.0.1} diff --git a/src/docker/distributed/client/compose.yml b/src/docker/distributed/client/compose.yml new file mode 100644 index 000000000000..6bc6e6739ae4 --- /dev/null +++ b/src/docker/distributed/client/compose.yml @@ -0,0 +1,128 @@ +services: + supernode-1: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: + - --superlink + - ${SUPERLINK_IP:-127.0.0.1}:9092 + - --supernode-address + - 0.0.0.0:9094 + - --isolation + - process + - --node-config + - "partition-id=0 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + supernode-2: + image: flwr/supernode:${FLWR_VERSION:-1.12.0} + command: + - --superlink + - ${SUPERLINK_IP:-127.0.0.1}:9092 + - --supernode-address + - 0.0.0.0:9095 + - --isolation + - process + - --node-config + - "partition-id=1 num-partitions=2" + - --root-certificates + - certificates/ca.crt + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + + # uncomment to add another SuperNode + # + # supernode-3: + # image: flwr/supernode:${FLWR_VERSION:-1.12.0} + # command: + # - --superlink + # - ${SUPERLINK_IP:-127.0.0.1}:9092 + # - --supernode-address + # - 0.0.0.0:9096 + # - --isolation + # - process + # - --node-config + # - "partition-id=1 num-partitions=2" + # - --root-certificates + # - certificates/ca.crt + # secrets: + # - source: superlink-ca-certfile + # target: /app/certificates/ca.crt + + clientapp-1: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-1:9094 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-1 + + clientapp-2: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flwr-clientapp"] + command: + - --supernode + - supernode-2:9095 + deploy: + resources: + limits: + cpus: "2" + stop_signal: SIGINT + depends_on: + - supernode-2 + + # uncomment to add another ClientApp + # + # clientapp-3: + # build: + # context: ${PROJECT_DIR:-.} + # dockerfile_inline: | + # FROM flwr/clientapp:${FLWR_VERSION:-1.12.0} + + # WORKDIR /app + # COPY --chown=app:app pyproject.toml . + # RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + # && python -m pip install -U --no-cache-dir . + + # ENTRYPOINT ["flwr-clientapp"] + # command: + # - --supernode + # - supernode-3:9096 + # deploy: + # resources: + # limits: + # cpus: "2" + # stop_signal: SIGINT + # depends_on: + # - supernode-3 + +secrets: + superlink-ca-certfile: + file: ../superlink-certificates/ca.crt diff --git a/src/docker/distributed/server/compose.yml b/src/docker/distributed/server/compose.yml new file mode 100644 index 000000000000..f53b63593eb8 --- /dev/null +++ b/src/docker/distributed/server/compose.yml @@ -0,0 +1,67 @@ +services: + superlink: + image: flwr/superlink:${FLWR_VERSION:-1.12.0} + command: + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + - --database=state/state.db + volumes: + - ./state/:/app/state/:rw + secrets: + - source: superlink-ca-certfile + target: /app/certificates/ca.crt + - source: superlink-certfile + target: /app/certificates/server.pem + - source: superlink-keyfile + target: /app/certificates/server.key + ports: + - 9092:9092 + + superexec: + build: + context: ${PROJECT_DIR:-.} + dockerfile_inline: | + FROM flwr/superexec:${FLWR_VERSION:-1.12.0} + + WORKDIR /app + COPY --chown=app:app pyproject.toml . + RUN sed -i 's/.*flwr\[simulation\].*//' pyproject.toml \ + && python -m pip install -U --no-cache-dir . + + ENTRYPOINT ["flower-superexec"] + command: + - --executor + - flwr.superexec.deployment:executor + - --executor-config + - superlink="superlink:9091" root-certificates="certificates/superlink-ca.crt" + - --ssl-ca-certfile=certificates/ca.crt + - --ssl-certfile=certificates/server.pem + - --ssl-keyfile=certificates/server.key + secrets: + - source: superlink-ca-certfile + target: /app/certificates/superlink-ca.crt + - source: superexec-ca-certfile + target: /app/certificates/ca.crt + - source: superexec-certfile + target: /app/certificates/server.pem + - source: superexec-keyfile + target: /app/certificates/server.key + ports: + - 9093:9093 + depends_on: + - superlink + +secrets: + superlink-ca-certfile: + file: ../superlink-certificates/ca.crt + superlink-certfile: + file: ../superlink-certificates/server.pem + superlink-keyfile: + file: ../superlink-certificates/server.key + superexec-ca-certfile: + file: ../superexec-certificates/ca.crt + superexec-certfile: + file: ../superexec-certificates/server.pem + superexec-keyfile: + file: ../superexec-certificates/server.key diff --git a/src/docker/serverapp/README.md b/src/docker/serverapp/README.md new file mode 100644 index 000000000000..110712fe3bfd --- /dev/null +++ b/src/docker/serverapp/README.md @@ -0,0 +1,48 @@ +# Flower ServerApp + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` +- `1.8.0`, `1.8.0-py3.11-ubuntu22.04` +- `1.8.0-py3.10-ubuntu22.04` +- `1.8.0-py3.9-ubuntu22.04` +- `1.8.0-py3.8-ubuntu22.04` diff --git a/src/docker/superexec/README.md b/src/docker/superexec/README.md new file mode 100644 index 000000000000..8026db18b978 --- /dev/null +++ b/src/docker/superexec/README.md @@ -0,0 +1,40 @@ +# Flower SuperExec + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` diff --git a/src/docker/superlink/README.md b/src/docker/superlink/README.md new file mode 100644 index 000000000000..af03ce1c8054 --- /dev/null +++ b/src/docker/superlink/README.md @@ -0,0 +1,37 @@ +# Flower SuperLink + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-alpine3.19` +- `1.12.0-py3.11-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-alpine3.19` +- `1.10.0-py3.11-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-alpine3.19` +- `1.9.0-py3.11-ubuntu22.04` +- `1.8.0`, `1.8.0-py3.11-alpine3.19` +- `1.8.0-py3.11-ubuntu22.04` diff --git a/src/docker/supernode/README.md b/src/docker/supernode/README.md new file mode 100644 index 000000000000..493f98cc78e4 --- /dev/null +++ b/src/docker/supernode/README.md @@ -0,0 +1,47 @@ +# Flower SuperNode + +

+ + Flower Website + +

+ +## Quick reference + +- **Learn more:**
+ [Quickstart with Docker](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker.html) and [Quickstart with Docker Compose](https://flower.ai/docs/framework/docker/tutorial-quickstart-docker-compose.html) + +- **Where to get help:**
+ [Flower Discuss](https://discuss.flower.ai), [Slack](https://flower.ai/join-slack) or [GitHub](https://github.com/adap/flower) + +- **Supported architectures:**
+ `amd64`, `arm64v8` + +## Supported tags + +- `unstable` + - points to the last successful build of the `main` branch +- `nightly`, `.dev` e.g. `1.13.0.dev20241014` + - uses Python 3.11 and Ubuntu 24.04 +- `1.12.0`, `1.12.0-py3.11-alpine3.19` +- `1.12.0-py3.11-ubuntu24.04` +- `1.12.0-py3.10-ubuntu24.04` +- `1.12.0-py3.9-ubuntu24.04` +- `1.11.1`, `1.11.1-py3.11-alpine3.19` +- `1.11.1-py3.11-ubuntu22.04` +- `1.11.1-py3.10-ubuntu22.04` +- `1.11.1-py3.9-ubuntu22.04` +- `1.11.1-py3.8-ubuntu22.04` +- `1.11.0`, `1.11.0-py3.11-alpine3.19` +- `1.11.0-py3.11-ubuntu22.04` +- `1.11.0-py3.10-ubuntu22.04` +- `1.11.0-py3.9-ubuntu22.04` +- `1.11.0-py3.8-ubuntu22.04` +- `1.10.0`, `1.10.0-py3.11-ubuntu22.04` +- `1.10.0-py3.10-ubuntu22.04` +- `1.10.0-py3.9-ubuntu22.04` +- `1.10.0-py3.8-ubuntu22.04` +- `1.9.0`, `1.9.0-py3.11-ubuntu22.04` +- `1.9.0-py3.10-ubuntu22.04` +- `1.9.0-py3.9-ubuntu22.04` +- `1.9.0-py3.8-ubuntu22.04` diff --git a/src/proto/flwr/proto/clientappio.proto b/src/proto/flwr/proto/clientappio.proto index 898cb04c5b5b..19d2db50501a 100644 --- a/src/proto/flwr/proto/clientappio.proto +++ b/src/proto/flwr/proto/clientappio.proto @@ -1,3 +1,18 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + syntax = "proto3"; package flwr.proto; @@ -30,9 +45,9 @@ message ClientAppOutputStatus { } message GetTokenRequest {} -message GetTokenResponse { sint64 token = 1; } +message GetTokenResponse { uint64 token = 1; } -message PullClientAppInputsRequest { sint64 token = 1; } +message PullClientAppInputsRequest { uint64 token = 1; } message PullClientAppInputsResponse { Message message = 1; Context context = 2; @@ -41,7 +56,7 @@ message PullClientAppInputsResponse { } message PushClientAppOutputsRequest { - sint64 token = 1; + uint64 token = 1; Message message = 2; Context context = 3; } diff --git a/src/proto/flwr/proto/control.proto b/src/proto/flwr/proto/control.proto new file mode 100644 index 000000000000..8b75c66fccaa --- /dev/null +++ b/src/proto/flwr/proto/control.proto @@ -0,0 +1,32 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + +syntax = "proto3"; + +package flwr.proto; + +import "flwr/proto/run.proto"; + +service Control { + // Request to create a new run + rpc CreateRun(CreateRunRequest) returns (CreateRunResponse) {} + + // Get the status of a given run + rpc GetRunStatus(GetRunStatusRequest) returns (GetRunStatusResponse) {} + + // Update the status of a given run + rpc UpdateRunStatus(UpdateRunStatusRequest) + returns (UpdateRunStatusResponse) {} +} diff --git a/src/proto/flwr/proto/driver.proto b/src/proto/flwr/proto/driver.proto index 63a2f78e6f6d..e26003862a76 100644 --- a/src/proto/flwr/proto/driver.proto +++ b/src/proto/flwr/proto/driver.proto @@ -21,7 +21,6 @@ import "flwr/proto/node.proto"; import "flwr/proto/task.proto"; import "flwr/proto/run.proto"; import "flwr/proto/fab.proto"; -import "flwr/proto/transport.proto"; service Driver { // Request run_id @@ -43,17 +42,8 @@ service Driver { rpc GetFab(GetFabRequest) returns (GetFabResponse) {} } -// CreateRun -message CreateRunRequest { - string fab_id = 1; - string fab_version = 2; - map override_config = 3; - Fab fab = 4; -} -message CreateRunResponse { sint64 run_id = 1; } - // GetNodes messages -message GetNodesRequest { sint64 run_id = 1; } +message GetNodesRequest { uint64 run_id = 1; } message GetNodesResponse { repeated Node nodes = 1; } // PushTaskIns messages diff --git a/src/proto/flwr/proto/exec.proto b/src/proto/flwr/proto/exec.proto index 65faf4386ea0..ad0723c0480c 100644 --- a/src/proto/flwr/proto/exec.proto +++ b/src/proto/flwr/proto/exec.proto @@ -33,6 +33,6 @@ message StartRunRequest { map override_config = 2; map federation_config = 3; } -message StartRunResponse { sint64 run_id = 1; } -message StreamLogsRequest { sint64 run_id = 1; } +message StartRunResponse { uint64 run_id = 1; } +message StreamLogsRequest { uint64 run_id = 1; } message StreamLogsResponse { string log_output = 1; } diff --git a/src/proto/flwr/proto/fab.proto b/src/proto/flwr/proto/fab.proto index 3620a95ff009..367b6e5b5c13 100644 --- a/src/proto/flwr/proto/fab.proto +++ b/src/proto/flwr/proto/fab.proto @@ -1,7 +1,24 @@ +// Copyright 2024 Flower Labs GmbH. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== + syntax = "proto3"; package flwr.proto; +import "flwr/proto/node.proto"; + message Fab { // This field is the hash of the data field. It is used to identify the data. // The hash is calculated using the SHA-256 algorithm and is represented as a @@ -11,5 +28,8 @@ message Fab { bytes content = 2; } -message GetFabRequest { string hash_str = 1; } +message GetFabRequest { + Node node = 1; + string hash_str = 2; +} message GetFabResponse { Fab fab = 1; } diff --git a/src/proto/flwr/proto/fleet.proto b/src/proto/flwr/proto/fleet.proto index b87214ac52f3..130b30b96669 100644 --- a/src/proto/flwr/proto/fleet.proto +++ b/src/proto/flwr/proto/fleet.proto @@ -69,7 +69,10 @@ message PullTaskInsResponse { } // PushTaskRes messages -message PushTaskResRequest { repeated TaskRes task_res_list = 1; } +message PushTaskResRequest { + Node node = 1; + repeated TaskRes task_res_list = 2; +} message PushTaskResResponse { Reconnect reconnect = 1; map results = 2; diff --git a/src/proto/flwr/proto/message.proto b/src/proto/flwr/proto/message.proto index 3230ab0609a9..7066da5b7e76 100644 --- a/src/proto/flwr/proto/message.proto +++ b/src/proto/flwr/proto/message.proto @@ -28,17 +28,17 @@ message Message { } message Context { - sint64 node_id = 1; + uint64 node_id = 1; map node_config = 2; RecordSet state = 3; map run_config = 4; } message Metadata { - sint64 run_id = 1; + uint64 run_id = 1; string message_id = 2; - sint64 src_node_id = 3; - sint64 dst_node_id = 4; + uint64 src_node_id = 3; + uint64 dst_node_id = 4; string reply_to_message = 5; string group_id = 6; double ttl = 7; diff --git a/src/proto/flwr/proto/node.proto b/src/proto/flwr/proto/node.proto index e61d44f0f783..ec72b51b44ec 100644 --- a/src/proto/flwr/proto/node.proto +++ b/src/proto/flwr/proto/node.proto @@ -18,6 +18,6 @@ syntax = "proto3"; package flwr.proto; message Node { - sint64 node_id = 1; + uint64 node_id = 1; bool anonymous = 2; } diff --git a/src/proto/flwr/proto/recordset.proto b/src/proto/flwr/proto/recordset.proto index d51d0f9ce416..939e97cf46e3 100644 --- a/src/proto/flwr/proto/recordset.proto +++ b/src/proto/flwr/proto/recordset.proto @@ -18,7 +18,8 @@ syntax = "proto3"; package flwr.proto; message DoubleList { repeated double vals = 1; } -message Sint64List { repeated sint64 vals = 1; } +message SintList { repeated sint64 vals = 1; } +message UintList { repeated uint64 vals = 1; } message BoolList { repeated bool vals = 1; } message StringList { repeated string vals = 1; } message BytesList { repeated bytes vals = 1; } @@ -35,10 +36,12 @@ message MetricsRecordValue { // Single element double double = 1; sint64 sint64 = 2; + uint64 uint64 = 3; // List types DoubleList double_list = 21; - Sint64List sint64_list = 22; + SintList sint_list = 22; + UintList uint_list = 23; } } @@ -47,16 +50,18 @@ message ConfigsRecordValue { // Single element double double = 1; sint64 sint64 = 2; - bool bool = 3; - string string = 4; - bytes bytes = 5; + uint64 uint64 = 3; + bool bool = 4; + string string = 5; + bytes bytes = 6; // List types DoubleList double_list = 21; - Sint64List sint64_list = 22; - BoolList bool_list = 23; - StringList string_list = 24; - BytesList bytes_list = 25; + SintList sint_list = 22; + UintList uint_list = 23; + BoolList bool_list = 24; + StringList string_list = 25; + BytesList bytes_list = 26; } } diff --git a/src/proto/flwr/proto/run.proto b/src/proto/flwr/proto/run.proto index 6adca5c2437b..4312e1127cc2 100644 --- a/src/proto/flwr/proto/run.proto +++ b/src/proto/flwr/proto/run.proto @@ -17,14 +17,53 @@ syntax = "proto3"; package flwr.proto; +import "flwr/proto/fab.proto"; +import "flwr/proto/node.proto"; import "flwr/proto/transport.proto"; message Run { - sint64 run_id = 1; + uint64 run_id = 1; string fab_id = 2; string fab_version = 3; map override_config = 4; string fab_hash = 5; } -message GetRunRequest { sint64 run_id = 1; } + +message RunStatus { + // "starting", "running", "finished" + string status = 1; + // "completed", "failed", "stopped" or "" (non-finished) + string sub_status = 2; + // failure details + string details = 3; +} + +// CreateRun +message CreateRunRequest { + string fab_id = 1; + string fab_version = 2; + map override_config = 3; + Fab fab = 4; +} +message CreateRunResponse { uint64 run_id = 1; } + +// GetRun +message GetRunRequest { + Node node = 1; + uint64 run_id = 2; +} message GetRunResponse { Run run = 1; } + +// UpdateRunStatus +message UpdateRunStatusRequest { + uint64 run_id = 1; + RunStatus run_status = 2; +} +message UpdateRunStatusResponse {} + +// GetRunStatus +message GetRunStatusRequest { + Node node = 1; + repeated uint64 run_ids = 2; +} +message GetRunStatusResponse { map run_status_dict = 1; } diff --git a/src/proto/flwr/proto/task.proto b/src/proto/flwr/proto/task.proto index 936b8120e495..324a70a5359c 100644 --- a/src/proto/flwr/proto/task.proto +++ b/src/proto/flwr/proto/task.proto @@ -37,13 +37,13 @@ message Task { message TaskIns { string task_id = 1; string group_id = 2; - sint64 run_id = 3; + uint64 run_id = 3; Task task = 4; } message TaskRes { string task_id = 1; string group_id = 2; - sint64 run_id = 3; + uint64 run_id = 3; Task task = 4; } diff --git a/src/proto/flwr/proto/transport.proto b/src/proto/flwr/proto/transport.proto index 17a285ebe44b..6a4f45aa3c97 100644 --- a/src/proto/flwr/proto/transport.proto +++ b/src/proto/flwr/proto/transport.proto @@ -107,7 +107,7 @@ message Scalar { // int32 int32 = 3; // int64 int64 = 4; // uint32 uint32 = 5; - // uint64 uint64 = 6; + uint64 uint64 = 6; // sint32 sint32 = 7; sint64 sint64 = 8; // fixed32 fixed32 = 9; diff --git a/src/py/flwr/cli/app.py b/src/py/flwr/cli/app.py index d1b270026cd7..8baccb4638fc 100644 --- a/src/py/flwr/cli/app.py +++ b/src/py/flwr/cli/app.py @@ -18,8 +18,8 @@ from typer.main import get_command from .build import build -from .example import example from .install import install +from .log import log from .new import new from .run import run @@ -33,10 +33,10 @@ ) app.command()(new) -app.command()(example) app.command()(run) app.command()(build) app.command()(install) +app.command()(log) typer_click_object = get_command(app) diff --git a/src/py/flwr/cli/build.py b/src/py/flwr/cli/build.py index 676bc1723568..4c9dca4ebcf1 100644 --- a/src/py/flwr/cli/build.py +++ b/src/py/flwr/cli/build.py @@ -14,27 +14,50 @@ # ============================================================================== """Flower command line interface `build` command.""" +import hashlib import os +import shutil +import tempfile import zipfile from pathlib import Path -from typing import Optional +from typing import Annotated, Any, Optional, Union import pathspec import tomli_w import typer -from typing_extensions import Annotated + +from flwr.common.constant import FAB_ALLOWED_EXTENSIONS, FAB_DATE, FAB_HASH_TRUNCATION from .config_utils import load_and_validate -from .utils import get_sha256_hash, is_valid_project_name +from .utils import is_valid_project_name + + +def write_to_zip( + zipfile_obj: zipfile.ZipFile, filename: str, contents: Union[bytes, str] +) -> zipfile.ZipFile: + """Set a fixed date and write contents to a zip file.""" + zip_info = zipfile.ZipInfo(filename) + zip_info.date_time = FAB_DATE + zipfile_obj.writestr(zip_info, contents) + return zipfile_obj + +def get_fab_filename(conf: dict[str, Any], fab_hash: str) -> str: + """Get the FAB filename based on the given config and FAB hash.""" + publisher = conf["tool"]["flwr"]["app"]["publisher"] + name = conf["project"]["name"] + version = conf["project"]["version"].replace(".", "-") + fab_hash_truncated = fab_hash[:FAB_HASH_TRUNCATION] + return f"{publisher}.{name}.{version}.{fab_hash_truncated}.fab" -# pylint: disable=too-many-locals + +# pylint: disable=too-many-locals, too-many-statements def build( app: Annotated[ Optional[Path], typer.Option(help="Path of the Flower App to bundle into a FAB"), ] = None, -) -> str: +) -> tuple[str, str]: """Build a Flower App into a Flower App Bundle (FAB). You can run ``flwr build`` without any arguments to bundle the app located in the @@ -86,16 +109,8 @@ def build( # Load .gitignore rules if present ignore_spec = _load_gitignore(app) - # Set the name of the zip file - fab_filename = ( - f"{conf['tool']['flwr']['app']['publisher']}" - f".{conf['project']['name']}" - f".{conf['project']['version'].replace('.', '-')}.fab" - ) list_file_content = "" - allowed_extensions = {".py", ".toml", ".md"} - # Remove the 'federations' field from 'tool.flwr' if it exists if ( "tool" in conf @@ -106,38 +121,53 @@ def build( toml_contents = tomli_w.dumps(conf) - with zipfile.ZipFile(fab_filename, "w", zipfile.ZIP_DEFLATED) as fab_file: - fab_file.writestr("pyproject.toml", toml_contents) + with tempfile.NamedTemporaryFile(suffix=".zip", delete=False) as temp_file: + temp_filename = temp_file.name + + with zipfile.ZipFile(temp_filename, "w", zipfile.ZIP_DEFLATED) as fab_file: + write_to_zip(fab_file, "pyproject.toml", toml_contents) - # Continue with adding other files - for root, _, files in os.walk(app, topdown=True): - files = [ + # Continue with adding other files + all_files = [ f - for f in files - if not ignore_spec.match_file(Path(root) / f) - and f != fab_filename - and Path(f).suffix in allowed_extensions - and f != "pyproject.toml" # Exclude the original pyproject.toml + for f in app.rglob("*") + if not ignore_spec.match_file(f) + and f.name != temp_filename + and f.suffix in FAB_ALLOWED_EXTENSIONS + and f.name != "pyproject.toml" # Exclude the original pyproject.toml ] - for file in files: - file_path = Path(root) / file + for file_path in all_files: + # Read the file content manually + with open(file_path, "rb") as f: + file_contents = f.read() + archive_path = file_path.relative_to(app) - fab_file.write(file_path, archive_path) + write_to_zip(fab_file, str(archive_path), file_contents) # Calculate file info - sha256_hash = get_sha256_hash(file_path) + sha256_hash = hashlib.sha256(file_contents).hexdigest() file_size_bits = os.path.getsize(file_path) * 8 # size in bits list_file_content += f"{archive_path},{sha256_hash},{file_size_bits}\n" - # Add CONTENT and CONTENT.jwt to the zip file - fab_file.writestr(".info/CONTENT", list_file_content) + # Add CONTENT and CONTENT.jwt to the zip file + write_to_zip(fab_file, ".info/CONTENT", list_file_content) + + # Get hash of FAB file + content = Path(temp_filename).read_bytes() + fab_hash = hashlib.sha256(content).hexdigest() + + # Set the name of the zip file + fab_filename = get_fab_filename(conf, fab_hash) + + # Once the temporary zip file is created, rename it to the final filename + shutil.move(temp_filename, fab_filename) typer.secho( f"🎊 Successfully built {fab_filename}", fg=typer.colors.GREEN, bold=True ) - return fab_filename + return fab_filename, fab_hash def _load_gitignore(app: Path) -> pathspec.PathSpec: diff --git a/src/py/flwr/cli/config_utils.py b/src/py/flwr/cli/config_utils.py index 233d35a5fa17..73ce779c3b5c 100644 --- a/src/py/flwr/cli/config_utils.py +++ b/src/py/flwr/cli/config_utils.py @@ -17,7 +17,7 @@ import zipfile from io import BytesIO from pathlib import Path -from typing import IO, Any, Dict, List, Optional, Tuple, Union, get_args +from typing import IO, Any, Optional, Union, get_args import tomli @@ -25,7 +25,7 @@ from flwr.common.typing import UserConfigValue -def get_fab_config(fab_file: Union[Path, bytes]) -> Dict[str, Any]: +def get_fab_config(fab_file: Union[Path, bytes]) -> dict[str, Any]: """Extract the config from a FAB file or path. Parameters @@ -62,7 +62,7 @@ def get_fab_config(fab_file: Union[Path, bytes]) -> Dict[str, Any]: return conf -def get_fab_metadata(fab_file: Union[Path, bytes]) -> Tuple[str, str]: +def get_fab_metadata(fab_file: Union[Path, bytes]) -> tuple[str, str]: """Extract the fab_id and the fab_version from a FAB file or path. Parameters @@ -87,9 +87,19 @@ def get_fab_metadata(fab_file: Union[Path, bytes]) -> Tuple[str, str]: def load_and_validate( path: Optional[Path] = None, check_module: bool = True, -) -> Tuple[Optional[Dict[str, Any]], List[str], List[str]]: +) -> tuple[Optional[dict[str, Any]], list[str], list[str]]: """Load and validate pyproject.toml as dict. + Parameters + ---------- + path : Optional[Path] (default: None) + The path of the Flower App config file to load. By default it + will try to use `pyproject.toml` inside the current directory. + check_module: bool (default: True) + Whether the validity of the Python module should be checked. + This requires the project to be installed in the currently + running environment. True by default. + Returns ------- Tuple[Optional[config], List[str], List[str]] @@ -116,7 +126,7 @@ def load_and_validate( return (config, errors, warnings) -def load(toml_path: Path) -> Optional[Dict[str, Any]]: +def load(toml_path: Path) -> Optional[dict[str, Any]]: """Load pyproject.toml and return as dict.""" if not toml_path.is_file(): return None @@ -125,7 +135,7 @@ def load(toml_path: Path) -> Optional[Dict[str, Any]]: return load_from_string(toml_file.read()) -def _validate_run_config(config_dict: Dict[str, Any], errors: List[str]) -> None: +def _validate_run_config(config_dict: dict[str, Any], errors: list[str]) -> None: for key, value in config_dict.items(): if isinstance(value, dict): _validate_run_config(config_dict[key], errors) @@ -137,7 +147,7 @@ def _validate_run_config(config_dict: Dict[str, Any], errors: List[str]) -> None # pylint: disable=too-many-branches -def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]]: +def validate_fields(config: dict[str, Any]) -> tuple[bool, list[str], list[str]]: """Validate pyproject.toml fields.""" errors = [] warnings = [] @@ -183,10 +193,10 @@ def validate_fields(config: Dict[str, Any]) -> Tuple[bool, List[str], List[str]] def validate( - config: Dict[str, Any], + config: dict[str, Any], check_module: bool = True, project_dir: Optional[Union[str, Path]] = None, -) -> Tuple[bool, List[str], List[str]]: +) -> tuple[bool, list[str], list[str]]: """Validate pyproject.toml.""" is_valid, errors, warnings = validate_fields(config) @@ -210,7 +220,7 @@ def validate( return True, [], [] -def load_from_string(toml_content: str) -> Optional[Dict[str, Any]]: +def load_from_string(toml_content: str) -> Optional[dict[str, Any]]: """Load TOML content from a string and return as dict.""" try: data = tomli.loads(toml_content) diff --git a/src/py/flwr/cli/config_utils_test.py b/src/py/flwr/cli/config_utils_test.py index cad6714521e3..ddabc152bc0f 100644 --- a/src/py/flwr/cli/config_utils_test.py +++ b/src/py/flwr/cli/config_utils_test.py @@ -17,7 +17,7 @@ import os import textwrap from pathlib import Path -from typing import Any, Dict +from typing import Any from .config_utils import load, validate, validate_fields @@ -155,7 +155,7 @@ def test_load_pyproject_toml_from_path(tmp_path: Path) -> None: def test_validate_pyproject_toml_fields_empty() -> None: """Test that validate_pyproject_toml_fields fails correctly.""" # Prepare - config: Dict[str, Any] = {} + config: dict[str, Any] = {} # Execute is_valid, errors, warnings = validate_fields(config) diff --git a/src/py/flwr/cli/install.py b/src/py/flwr/cli/install.py index 4318ccdf9ffb..7451aa3d2326 100644 --- a/src/py/flwr/cli/install.py +++ b/src/py/flwr/cli/install.py @@ -14,19 +14,19 @@ # ============================================================================== """Flower command line interface `install` command.""" - +import hashlib import shutil import subprocess import tempfile import zipfile from io import BytesIO from pathlib import Path -from typing import IO, Optional, Union +from typing import IO, Annotated, Optional, Union import typer -from typing_extensions import Annotated -from flwr.common.config import get_flwr_dir +from flwr.common.config import get_flwr_dir, get_metadata_from_config +from flwr.common.constant import FAB_HASH_TRUNCATION from .config_utils import load_and_validate from .utils import get_sha256_hash @@ -92,9 +92,11 @@ def install_from_fab( fab_name: Optional[str] if isinstance(fab_file, bytes): fab_file_archive = BytesIO(fab_file) + fab_hash = hashlib.sha256(fab_file).hexdigest() fab_name = None elif isinstance(fab_file, Path): fab_file_archive = fab_file + fab_hash = hashlib.sha256(fab_file.read_bytes()).hexdigest() fab_name = fab_file.stem else: raise ValueError("fab_file must be either a Path or bytes") @@ -127,14 +129,16 @@ def install_from_fab( shutil.rmtree(info_dir) installed_path = validate_and_install( - tmpdir_path, fab_name, flwr_dir, skip_prompt + tmpdir_path, fab_hash, fab_name, flwr_dir, skip_prompt ) return installed_path +# pylint: disable=too-many-locals def validate_and_install( project_dir: Path, + fab_hash: str, fab_name: Optional[str], flwr_dir: Optional[Path], skip_prompt: bool = False, @@ -150,28 +154,17 @@ def validate_and_install( ) raise typer.Exit(code=1) - publisher = config["tool"]["flwr"]["app"]["publisher"] - project_name = config["project"]["name"] - version = config["project"]["version"] + version, fab_id = get_metadata_from_config(config) + publisher, project_name = fab_id.split("/") + config_metadata = (publisher, project_name, version, fab_hash) - if ( - fab_name - and fab_name != f"{publisher}.{project_name}.{version.replace('.', '-')}" - ): - typer.secho( - "❌ FAB file has incorrect name. The file name must follow the format " - "`...fab`.", - fg=typer.colors.RED, - bold=True, - ) - raise typer.Exit(code=1) + if fab_name: + _validate_fab_and_config_metadata(fab_name, config_metadata) install_dir: Path = ( (get_flwr_dir() if not flwr_dir else flwr_dir) / "apps" - / publisher - / project_name - / version + / f"{publisher}.{project_name}.{version}.{fab_hash[:FAB_HASH_TRUNCATION]}" ) if install_dir.exists(): if skip_prompt: @@ -227,3 +220,49 @@ def _verify_hashes(list_content: str, tmpdir: Path) -> bool: if not file_path.exists() or get_sha256_hash(file_path) != hash_expected: return False return True + + +def _validate_fab_and_config_metadata( + fab_name: str, config_metadata: tuple[str, str, str, str] +) -> None: + """Validate metadata from the FAB filename and config.""" + publisher, project_name, version, fab_hash = config_metadata + + fab_name = fab_name.removesuffix(".fab") + + fab_publisher, fab_project_name, fab_version, fab_shorthash = fab_name.split(".") + fab_version = fab_version.replace("-", ".") + + # Check FAB filename format + if ( + f"{fab_publisher}.{fab_project_name}.{fab_version}" + != f"{publisher}.{project_name}.{version}" + or len(fab_shorthash) != FAB_HASH_TRUNCATION # Verify hash length + ): + typer.secho( + "❌ FAB file has incorrect name. The file name must follow the format " + "`...<8hexchars>.fab`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Verify hash is a valid hexadecimal + try: + _ = int(fab_shorthash, 16) + except Exception as e: + typer.secho( + f"❌ FAB file has an invalid hexadecimal string `{fab_shorthash}`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) from e + + # Verify shorthash matches + if fab_shorthash != fab_hash[:FAB_HASH_TRUNCATION]: + typer.secho( + "❌ The hash in the FAB file name does not match the hash of the FAB.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) diff --git a/src/py/flwr/cli/log.py b/src/py/flwr/cli/log.py new file mode 100644 index 000000000000..7199cefce4f7 --- /dev/null +++ b/src/py/flwr/cli/log.py @@ -0,0 +1,234 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower command line interface `log` command.""" + +import sys +import time +from logging import DEBUG, ERROR, INFO +from pathlib import Path +from typing import Annotated, Optional + +import grpc +import typer + +from flwr.cli.config_utils import load_and_validate +from flwr.common.grpc import GRPC_MAX_MESSAGE_LENGTH, create_channel +from flwr.common.logger import log as logger +from flwr.proto.exec_pb2 import StreamLogsRequest # pylint: disable=E0611 +from flwr.proto.exec_pb2_grpc import ExecStub + +CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) + + +def start_stream( + run_id: int, channel: grpc.Channel, refresh_period: int = CONN_REFRESH_PERIOD +) -> None: + """Start log streaming for a given run ID.""" + try: + while True: + logger(INFO, "Starting logstream for run_id `%s`", run_id) + stream_logs(run_id, channel, refresh_period) + time.sleep(2) + logger(DEBUG, "Reconnecting to logstream") + except KeyboardInterrupt: + logger(INFO, "Exiting logstream") + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + if e.code() == grpc.StatusCode.CANCELLED: + pass + finally: + channel.close() + + +def stream_logs(run_id: int, channel: grpc.Channel, duration: int) -> None: + """Stream logs from the beginning of a run with connection refresh.""" + start_time = time.time() + stub = ExecStub(channel) + req = StreamLogsRequest(run_id=run_id) + + for res in stub.StreamLogs(req): + print(res.log_output) + if time.time() - start_time > duration: + break + + +def print_logs(run_id: int, channel: grpc.Channel, timeout: int) -> None: + """Print logs from the beginning of a run.""" + stub = ExecStub(channel) + req = StreamLogsRequest(run_id=run_id) + + try: + while True: + try: + # Enforce timeout for graceful exit + for res in stub.StreamLogs(req, timeout=timeout): + print(res.log_output) + except grpc.RpcError as e: + # pylint: disable=E1101 + if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: + break + if e.code() == grpc.StatusCode.NOT_FOUND: + logger(ERROR, "Invalid run_id `%s`, exiting", run_id) + break + if e.code() == grpc.StatusCode.CANCELLED: + break + except KeyboardInterrupt: + logger(DEBUG, "Stream interrupted by user") + finally: + channel.close() + logger(DEBUG, "Channel closed") + + +def on_channel_state_change(channel_connectivity: str) -> None: + """Log channel connectivity.""" + logger(DEBUG, channel_connectivity) + + +def log( + run_id: Annotated[ + int, + typer.Argument(help="The Flower run ID to query"), + ], + app: Annotated[ + Path, + typer.Argument(help="Path of the Flower project to run"), + ] = Path("."), + federation: Annotated[ + Optional[str], + typer.Argument(help="Name of the federation to run the app on"), + ] = None, + stream: Annotated[ + bool, + typer.Option( + "--stream/--show", + help="Flag to stream or print logs from the Flower run", + ), + ] = True, +) -> None: + """Get logs from a Flower project run.""" + typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) + + pyproject_path = app / "pyproject.toml" if app else None + config, errors, warnings = load_and_validate(path=pyproject_path) + + if config is None: + typer.secho( + "Project configuration could not be loaded.\n" + "pyproject.toml is invalid:\n" + + "\n".join([f"- {line}" for line in errors]), + fg=typer.colors.RED, + bold=True, + ) + sys.exit() + + if warnings: + typer.secho( + "Project configuration is missing the following " + "recommended properties:\n" + "\n".join([f"- {line}" for line in warnings]), + fg=typer.colors.RED, + bold=True, + ) + + typer.secho("Success", fg=typer.colors.GREEN) + + federation = federation or config["tool"]["flwr"]["federations"].get("default") + + if federation is None: + typer.secho( + "❌ No federation name was provided and the project's `pyproject.toml` " + "doesn't declare a default federation (with a SuperExec address or an " + "`options.num-supernodes` value).", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + # Validate the federation exists in the configuration + federation_config = config["tool"]["flwr"]["federations"].get(federation) + if federation_config is None: + available_feds = { + fed for fed in config["tool"]["flwr"]["federations"] if fed != "default" + } + typer.secho( + f"❌ There is no `{federation}` federation declared in the " + "`pyproject.toml`.\n The following federations were found:\n\n" + + "\n".join(available_feds), + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + if "address" not in federation_config: + typer.secho( + "❌ `flwr log` currently works with `SuperExec`. Ensure that the correct" + "`SuperExec` address is provided in the `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + _log_with_superexec(federation_config, run_id, stream) + + +# pylint: disable-next=too-many-branches +def _log_with_superexec( + federation_config: dict[str, str], + run_id: int, + stream: bool, +) -> None: + insecure_str = federation_config.get("insecure") + if root_certificates := federation_config.get("root-certificates"): + root_certificates_bytes = Path(root_certificates).read_bytes() + if insecure := bool(insecure_str): + typer.secho( + "❌ `root_certificates` were provided but the `insecure` parameter" + "is set to `True`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + else: + root_certificates_bytes = None + if insecure_str is None: + typer.secho( + "❌ To disable TLS, set `insecure = true` in `pyproject.toml`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + if not (insecure := bool(insecure_str)): + typer.secho( + "❌ No certificate were given yet `insecure` is set to `False`.", + fg=typer.colors.RED, + bold=True, + ) + raise typer.Exit(code=1) + + channel = create_channel( + server_address=federation_config["address"], + insecure=insecure, + root_certificates=root_certificates_bytes, + max_message_length=GRPC_MAX_MESSAGE_LENGTH, + interceptors=None, + ) + channel.subscribe(on_channel_state_change) + + if stream: + start_stream(run_id, channel, CONN_REFRESH_PERIOD) + else: + logger(INFO, "Printing logstream for run_id `%s`", run_id) + print_logs(run_id, channel, timeout=5) diff --git a/src/py/flwr/cli/log_test.py b/src/py/flwr/cli/log_test.py new file mode 100644 index 000000000000..932610bea2f3 --- /dev/null +++ b/src/py/flwr/cli/log_test.py @@ -0,0 +1,78 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Test for Flower command line interface `log` command.""" + + +import unittest +from typing import NoReturn +from unittest.mock import Mock, call, patch + +from flwr.proto.exec_pb2 import StreamLogsResponse # pylint: disable=E0611 + +from .log import print_logs, stream_logs + + +class InterruptedStreamLogsResponse: + """Create a StreamLogsResponse object with KeyboardInterrupt.""" + + @property + def log_output(self) -> NoReturn: + """Raise KeyboardInterrupt to exit logstream test gracefully.""" + raise KeyboardInterrupt + + +class TestFlwrLog(unittest.TestCase): + """Unit tests for `flwr log` CLI functions.""" + + def setUp(self) -> None: + """Initialize mock ExecStub before each test.""" + self.expected_calls = [ + call("log_output_1"), + call("log_output_2"), + call("log_output_3"), + ] + mock_response_iterator = [ + iter( + [StreamLogsResponse(log_output=f"log_output_{i}") for i in range(1, 4)] + + [InterruptedStreamLogsResponse()] + ) + ] + self.mock_stub = Mock() + self.mock_stub.StreamLogs.side_effect = mock_response_iterator + self.patcher = patch("flwr.cli.log.ExecStub", return_value=self.mock_stub) + + self.patcher.start() + + # Create mock channel + self.mock_channel = Mock() + + def tearDown(self) -> None: + """Cleanup.""" + self.patcher.stop() + + def test_flwr_log_stream_method(self) -> None: + """Test stream_logs.""" + with patch("builtins.print") as mock_print: + with self.assertRaises(KeyboardInterrupt): + stream_logs(run_id=123, channel=self.mock_channel, duration=1) + # Assert that mock print was called with the expected arguments + mock_print.assert_has_calls(self.expected_calls) + + def test_flwr_log_print_method(self) -> None: + """Test print_logs.""" + with patch("builtins.print") as mock_print: + print_logs(run_id=123, channel=self.mock_channel, timeout=0) + # Assert that mock print was called with the expected arguments + mock_print.assert_has_calls(self.expected_calls) diff --git a/src/py/flwr/cli/new/new.py b/src/py/flwr/cli/new/new.py index 862244da9158..3cbde991ff6e 100644 --- a/src/py/flwr/cli/new/new.py +++ b/src/py/flwr/cli/new/new.py @@ -18,10 +18,9 @@ from enum import Enum from pathlib import Path from string import Template -from typing import Dict, Optional +from typing import Annotated, Optional import typer -from typing_extensions import Annotated from ..utils import ( is_valid_project_name, @@ -42,6 +41,7 @@ class MlFramework(str, Enum): MLX = "MLX" NUMPY = "NumPy" FLOWERTUNE = "FlowerTune" + BASELINE = "Flower Baseline" class LlmChallengeName(str, Enum): @@ -69,7 +69,7 @@ def load_template(name: str) -> str: return tpl_file.read() -def render_template(template: str, data: Dict[str, str]) -> str: +def render_template(template: str, data: dict[str, str]) -> str: """Render template.""" tpl_file = load_template(template) tpl = Template(tpl_file) @@ -84,7 +84,7 @@ def create_file(file_path: Path, content: str) -> None: file_path.write_text(content) -def render_and_create(file_path: Path, template: str, context: Dict[str, str]) -> None: +def render_and_create(file_path: Path, template: str, context: dict[str, str]) -> None: """Render template and write to file.""" content = render_template(template, context) create_file(file_path, content) @@ -135,34 +135,23 @@ def new( username = prompt_text("Please provide your Flower username") if framework is not None: - framework_str_upper = str(framework.value) + framework_str = str(framework.value) else: - framework_value = prompt_options( + framework_str = prompt_options( "Please select ML framework by typing in the number", [mlf.value for mlf in MlFramework], ) - selected_value = [ - name - for name, value in vars(MlFramework).items() - if value == framework_value - ] - framework_str_upper = selected_value[0] - - framework_str = framework_str_upper.lower() llm_challenge_str = None - if framework_str == "flowertune": + if framework_str == MlFramework.FLOWERTUNE: llm_challenge_value = prompt_options( "Please select LLM challenge by typing in the number", sorted([challenge.value for challenge in LlmChallengeName]), ) - selected_value = [ - name - for name, value in vars(LlmChallengeName).items() - if value == llm_challenge_value - ] - llm_challenge_str = selected_value[0] - llm_challenge_str = llm_challenge_str.lower() + llm_challenge_str = llm_challenge_value.lower() + + if framework_str == MlFramework.BASELINE: + framework_str = "baseline" print( typer.style( @@ -173,38 +162,36 @@ def new( ) context = { - "framework_str": framework_str_upper, + "framework_str": framework_str, "import_name": import_name.replace("-", "_"), "package_name": package_name, "project_name": app_name, "username": username, } + template_name = framework_str.lower() + # List of files to render if llm_challenge_str: files = { ".gitignore": {"template": "app/.gitignore.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, - "README.md": {"template": f"app/README.{framework_str}.md.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, + "README.md": {"template": f"app/README.{template_name}.md.tpl"}, f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, - f"{import_name}/server.py": { - "template": "app/code/flwr_tune/server.py.tpl" + f"{import_name}/server_app.py": { + "template": "app/code/flwr_tune/server_app.py.tpl" }, - f"{import_name}/client.py": { - "template": "app/code/flwr_tune/client.py.tpl" + f"{import_name}/client_app.py": { + "template": "app/code/flwr_tune/client_app.py.tpl" }, - f"{import_name}/app.py": {"template": "app/code/flwr_tune/app.py.tpl"}, f"{import_name}/models.py": { "template": "app/code/flwr_tune/models.py.tpl" }, f"{import_name}/dataset.py": { "template": "app/code/flwr_tune/dataset.py.tpl" }, - f"{import_name}/conf/config.yaml": { - "template": "app/code/flwr_tune/config.yaml.tpl" - }, - f"{import_name}/conf/static_config.yaml": { - "template": "app/code/flwr_tune/static_config.yaml.tpl" + f"{import_name}/strategy.py": { + "template": "app/code/flwr_tune/strategy.py.tpl" }, } @@ -236,29 +223,44 @@ def new( files = { ".gitignore": {"template": "app/.gitignore.tpl"}, "README.md": {"template": "app/README.md.tpl"}, - "pyproject.toml": {"template": f"app/pyproject.{framework_str}.toml.tpl"}, + "pyproject.toml": {"template": f"app/pyproject.{template_name}.toml.tpl"}, f"{import_name}/__init__.py": {"template": "app/code/__init__.py.tpl"}, f"{import_name}/server_app.py": { - "template": f"app/code/server.{framework_str}.py.tpl" + "template": f"app/code/server.{template_name}.py.tpl" }, f"{import_name}/client_app.py": { - "template": f"app/code/client.{framework_str}.py.tpl" + "template": f"app/code/client.{template_name}.py.tpl" }, } # Depending on the framework, generate task.py file frameworks_with_tasks = [ - MlFramework.PYTORCH.value.lower(), - MlFramework.JAX.value.lower(), - MlFramework.HUGGINGFACE.value.lower(), - MlFramework.MLX.value.lower(), - MlFramework.TENSORFLOW.value.lower(), + MlFramework.PYTORCH.value, + MlFramework.JAX.value, + MlFramework.HUGGINGFACE.value, + MlFramework.MLX.value, + MlFramework.TENSORFLOW.value, + MlFramework.SKLEARN.value, + MlFramework.NUMPY.value, ] if framework_str in frameworks_with_tasks: files[f"{import_name}/task.py"] = { - "template": f"app/code/task.{framework_str}.py.tpl" + "template": f"app/code/task.{template_name}.py.tpl" } + if framework_str == "baseline": + # Include additional files for baseline template + for file_name in ["model", "dataset", "strategy", "utils", "__init__"]: + files[f"{import_name}/{file_name}.py"] = { + "template": f"app/code/{file_name}.{template_name}.py.tpl" + } + + # Replace README.md + files["README.md"]["template"] = f"app/README.{template_name}.md.tpl" + + # Add LICENSE + files["LICENSE"] = {"template": "app/LICENSE.tpl"} + for file_path, value in files.items(): render_and_create( file_path=project_dir / file_path, @@ -275,7 +277,7 @@ def new( ) ) - _add = " huggingface-cli login\n" if framework_str == "flowertune" else "" + _add = " huggingface-cli login\n" if llm_challenge_str else "" print( typer.style( f" cd {package_name}\n" + " pip install -e .\n" + _add + " flwr run\n", diff --git a/src/py/flwr/cli/new/templates/app/LICENSE.tpl b/src/py/flwr/cli/new/templates/app/LICENSE.tpl new file mode 100644 index 000000000000..7a4a3ea2424c --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/LICENSE.tpl @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl b/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl new file mode 100644 index 000000000000..9bbbe8f22794 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/README.baseline.md.tpl @@ -0,0 +1,127 @@ +--- +title: title of the paper # TODO +url: https://arxiv.org/abs/2007.14390 # TODO: update with the link to your paper +labels: [label1, label2] # TODO: please add between 4 and 10 single-word (maybe two-words) labels (e.g. system heterogeneity, image classification, asynchronous, weight sharing, cross-silo). Do not use "". Remove this comment once you are done. +dataset: [dataset1, dataset2] # TODO: list of datasets you include in your baseline. Do not use "". Remove this comment once you are done. +--- + +> [!IMPORTANT] +> This is the template for your `README.md`. Please fill-in the information in all areas with a :warning: symbol. +> Please refer to the [Flower Baselines contribution](https://flower.ai/docs/baselines/how-to-contribute-baselines.html) and [Flower Baselines usage](https://flower.ai/docs/baselines/how-to-use-baselines.html) guides for more details. +> Please complete the metadata section at the very top of this README. This generates a table at the top of the file that will facilitate indexing baselines. +> Please remove this [!IMPORTANT] block once you are done with your `README.md` as well as all the `:warning:` symbols and the comments next to them. + +> [!IMPORTANT] +> To help having all baselines similarly formatted and structured, we have included two scripts in `baselines/dev` that when run will format your code and run some tests checking if it's formatted. +> These checks use standard packages such as `isort`, `black`, `pylint` and others. You as a baseline creator will need to install additional pacakges. These are already specified in the `pyproject.toml` of +> your baseline. Follow these steps: + +```bash +# Create a python env +pyenv virtualenv 3.10.14 $project_name + +# Activate it +pyenv activate $project_name + +# Install project including developer packages +# Note the `-e` this means you install it in editable mode +# so even if you change the code you don't need to do `pip install` +# again. However, if you add a new dependency to `pyproject.toml` you +# will need to re-run the command below +pip install -e ".[dev]" + +# Even without modifying or adding new code, you can run your baseline +# with the placeholder code generated when you did `flwr new`. If you +# want to test this to familiarise yourself with how flower apps are +# executed, execute this from the directory where you `pyproject.toml` is: +flwr run . + +# At anypoint during the process of creating your baseline you can +# run the formatting script. For this do: +cd .. # so you are in the `flower/baselines` directory + +# Run the formatting script (it will auto-correct issues if possible) +./dev/format-baseline.sh $project_name + +# Then, if the above is all good, run the tests. +./dev/test-baseline.sh $project_name +``` + +> [!IMPORTANT] +> When you open a PR to get the baseline merged into the main Flower repository, the `./dev/test-baseline.sh` script will run. Only if test pass, the baseline can be merged. +> Some issues highlighted by the tests script are easier than others to fix. Do not hesitate in reaching out for help to us (e.g. as a comment in your PR) if you are stuck with these. +> Before opening your PR, please remove the code snippet above as well all the [!IMPORTANT] message blocks. Yes, including this one. + +# :warning: *_Title of your baseline_* # Also copy this title to the `description` in the `[project]` section of your `pyproject.toml`. + +> [!NOTE] +> If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +**Paper:** :warning: *_add the URL of the paper page (not to the .pdf). For instance if you link a paper on ArXiv, add here the URL to the abstract page (e.g. [paper](https://arxiv.org/abs/1512.03385)). If your paper is in from a journal or conference proceedings, please follow the same logic._* + +**Authors:** :warning: *_list authors of the paper_* + +**Abstract:** :warning: *_add here the abstract of the paper you are implementing_* + + +## About this baseline + +**What’s implemented:** :warning: *_Concisely describe what experiment(s) (e.g. Figure 1, Table 2, etc) in the publication can be replicated by running the code. Please only use a few sentences. ”_* + +**Datasets:** :warning: *_List the datasets you used (if you used a medium to large dataset, >10GB please also include the sizes of the dataset). We highly recommend using [FlowerDatasets](https://flower.ai/docs/datasets/index.html) to download and partition your dataset. If you have other ways to download the data, you can also use `FlowerDatasets` to partiion it._* + +**Hardware Setup:** :warning: *_Give some details about the hardware (e.g. a server with 8x V100 32GB and 256GB of RAM) you used to run the experiments for this baseline. Indicate how long it took to run the experiments. Someone out there might not have access to the same resources you have so, could you list the absolute minimum hardware needed to run the experiment in a reasonable amount of time ? (e.g. minimum is 1x 16GB GPU otherwise a client model can’t be trained with a sufficiently large batch size). Could you test this works too?_* + +**Contributors:** :warning: *_let the world know who contributed to this baseline. This could be either your name, your name and affiliation at the time, or your GitHub profile name if you prefer. If multiple contributors signed up for this baseline, please list yourself and your colleagues_* + + +## Experimental Setup + +**Task:** :warning: *_what’s the primary task that is being federated? (e.g. image classification, next-word prediction). If you have experiments for several, please list them_* + +**Model:** :warning: *_provide details about the model you used in your experiments (if more than use a list). If your model is small, describing it as a table would be :100:. Some FL methods do not use an off-the-shelve model (e.g. ResNet18) instead they create your own. If this is your case, please provide a summary here and give pointers to where in the paper (e.g. Appendix B.4) is detailed._* + +**Dataset:** :warning: *_Earlier you listed already the datasets that your baseline uses. Now you should include a breakdown of the details about each of them. Please include information about: how the dataset is partitioned (e.g. LDA with alpha 0.1 as default and all clients have the same number of training examples; or each client gets assigned a different number of samples following a power-law distribution with each client only instances of 2 classes)? if your dataset is naturally partitioned just state “naturally partitioned”; how many partitions there are (i.e. how many clients)? Please include this an all information relevant about the dataset and its partitioning into a table._* + +**Training Hyperparameters:** :warning: *_Include a table with all the main hyperparameters in your baseline. Please show them with their default value._* + + +## Environment Setup + +:warning: _Specify the steps to create and activate your environment and install the baseline project. Most baselines are expected to require minimal steps as shown below. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ + +:warning: _The dependencies for your baseline are listed in the `pyproject.toml`, extend it with additional packages needed for your baseline._ + +:warning: _Baselines should use Python 3.10, [pyenv](https://github.com/pyenv/pyenv), and the [virtualenv](https://github.com/pyenv/pyenv-virtualenv) plugging. + +```bash +# Create the virtual environment +pyenv virtualenv 3.10.14 + +# Activate it +pyenv activate + +# Install the baseline +pip install -e . +``` + +:warning: _If your baseline requires running some script before starting an experiment, please indicate so here_. + +## Running the Experiments + +:warning: _Make sure you have adjusted the `client-resources` in the federation in `pyproject.toml` so your simulation makes the best use of the system resources available._ + +:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ + +:warning: _You might want to add more hyperparameters and settings for your baseline. You can do so by extending `[tool.flwr.app.config]` in `pyproject.toml`. In addition, you can create a new `.toml` file that can be passed with the `--run-config` command (see below an example) to override several config values **already present** in `pyproject.toml`._ +```bash +# it is likely that for one experiment you need to override some arguments. +flwr run . --run-config learning-rate=0.1,coefficient=0.123 + +# or you might want to load different `.toml` configs all together: +flwr run . --run-config .toml +``` + +:warning: _It is preferable to show a single commmand (or multilple commands if they belong to the same experiment) and then a table/plot with the expected results, instead of showing all the commands first and then all the results/plots._ +:warning: _If you present plots or other figures, please include either a Jupyter notebook showing how to create them or include a utility function that can be called after the experiments finish running._ +:warning: If you include plots or figures, save them in `.png` format and place them in a new directory named `_static` at the same level as your `README.md`. diff --git a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl index 2b59937e4130..2703f0a86a3e 100644 --- a/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl +++ b/src/py/flwr/cli/new/templates/app/README.flowertune.md.tpl @@ -23,10 +23,12 @@ pip install -e . ## Experimental setup -The dataset is partitioned into $num_clients shards with IID fashion serving as clients. -We randomly sample $fraction_fit clients to be available for each round, -and the federated fine-tuning lasts for `200` rounds. -All settings are defined in `$project_name/conf/static_config.yaml`, which is not allowed to be modified for fair competition if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). +The dataset is divided into $num_clients partitions in an IID fashion, a partition is assigned to each ClientApp. +We randomly sample a fraction ($fraction_fit) of the total nodes to participate in each round, for a total of `200` rounds. +All settings are defined in `pyproject.toml`. + +> [!IMPORTANT] +> Please note that `[tool.flwr.app.config.static]` and `options.num-supernodes` under `[tool.flwr.federations.local-simulation]` are not allowed to be modified for fair competition if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). ## Running the challenge @@ -39,7 +41,7 @@ huggingface-cli login ``` Run the challenge with default config values. -The configs are in `$project_name/conf/config.yaml` and `$project_name/conf/static_config.yaml`, and are loaded automatically. +The configs are defined in `[tool.flwr.app.config]` entry of `pyproject.toml`, and are loaded automatically. ```bash flwr run @@ -53,4 +55,12 @@ We use Mistral-7B model with 4-bit quantization as default. The estimated VRAM c | :--------: | :--------: | :--------: | :--------: | :--------: | | VRAM | ~25.50 GB | ~17.30 GB | ~22.80 GB | ~17.40 GB | -You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which is specified with `flower.engine.simulation` in `pyproject.toml`. +You can adjust the CPU/GPU resources you assign to each of the clients based on your device, which are specified with `options.backend.client-resources.num-cpus` and `options.backend.client-resources.num-gpus` under `[tool.flwr.federations.local-simulation]` entry in `pyproject.toml`. + + +## Model saving + +The global PEFT model checkpoints are saved every 5 rounds after aggregation on the sever side as default, which can be specified with `train.save-every-round` under [tool.flwr.app.config] entry in `pyproject.toml`. + +> [!NOTE] +> Please provide the last PEFT checkpoint if you plan to participated in the [LLM leaderboard](https://flower.ai/benchmarks/llm-leaderboard). diff --git a/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/__init__.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl new file mode 100644 index 000000000000..83a475f20d27 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/client.baseline.py.tpl @@ -0,0 +1,58 @@ +"""$project_name: A Flower Baseline.""" + +import torch + +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.dataset import load_data +from $import_name.model import Net, get_weights, set_weights, test, train + + +class FlowerClient(NumPyClient): + """A class defining the client.""" + + def __init__(self, net, trainloader, valloader, local_epochs): + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.local_epochs = local_epochs + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) + + def fit(self, parameters, config): + """Traim model using this client's data.""" + set_weights(self.net, parameters) + train_loss = train( + self.net, + self.trainloader, + self.local_epochs, + self.device, + ) + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) + + def evaluate(self, parameters, config): + """Evaluate model using this client's data.""" + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.valloader, self.device) + return loss, len(self.valloader.dataset), {"accuracy": accuracy} + + +def client_fn(context: Context): + """Construct a Client that will be run in a ClientApp.""" + # Load model and data + net = Net() + partition_id = int(context.node_config["partition-id"]) + num_partitions = int(context.node_config["num-partitions"]) + trainloader, valloader = load_data(partition_id, num_partitions) + local_epochs = context.run_config["local-epochs"] + + # Return Client instance + return FlowerClient(net, trainloader, valloader, local_epochs).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl index 3041a69e3aaa..840f938b4ecc 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.huggingface.py.tpl @@ -1,18 +1,11 @@ """$project_name: A Flower / $framework_str app.""" +import torch from flwr.client import ClientApp, NumPyClient from flwr.common import Context from transformers import AutoModelForSequenceClassification -from $import_name.task import ( - get_weights, - load_data, - set_weights, - train, - test, - CHECKPOINT, - DEVICE, -) +from $import_name.task import get_weights, load_data, set_weights, test, train # Flower client @@ -22,37 +15,34 @@ class FlowerClient(NumPyClient): self.trainloader = trainloader self.testloader = testloader self.local_epochs = local_epochs - - def get_parameters(self, config): - return get_weights(self.net) - - def set_parameters(self, parameters): - set_weights(self.net, parameters) + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + self.net.to(self.device) def fit(self, parameters, config): - self.set_parameters(parameters) - train( - self.net, - self.trainloader, - epochs=self.local_epochs, - ) - return self.get_parameters(config={}), len(self.trainloader), {} + set_weights(self.net, parameters) + train(self.net, self.trainloader, epochs=self.local_epochs, device=self.device) + return get_weights(self.net), len(self.trainloader), {} def evaluate(self, parameters, config): - self.set_parameters(parameters) - loss, accuracy = test(self.net, self.testloader) + set_weights(self.net, parameters) + loss, accuracy = test(self.net, self.testloader, self.device) return float(loss), len(self.testloader), {"accuracy": accuracy} def client_fn(context: Context): - # Load model and data - net = AutoModelForSequenceClassification.from_pretrained( - CHECKPOINT, num_labels=2 - ).to(DEVICE) + # Get this client's dataset partition partition_id = context.node_config["partition-id"] num_partitions = context.node_config["num-partitions"] - trainloader, valloader = load_data(partition_id, num_partitions) + model_name = context.run_config["model-name"] + trainloader, valloader = load_data(partition_id, num_partitions, model_name) + + # Load model + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + local_epochs = context.run_config["local-epochs"] # Return Client instance diff --git a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl index 046de57f3cf3..ffe782d274fc 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.jax.py.tpl @@ -1,9 +1,9 @@ """$project_name: A Flower / $framework_str app.""" import jax -from flwr.client import NumPyClient, ClientApp -from flwr.common import Context +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context from $import_name.task import ( evaluation, get_params, @@ -17,37 +17,31 @@ from $import_name.task import ( # Define Flower Client and client_fn class FlowerClient(NumPyClient): - def __init__(self): + def __init__(self, input_dim): self.train_x, self.train_y, self.test_x, self.test_y = load_data() self.grad_fn = jax.grad(loss_fn) - model_shape = self.train_x.shape[1:] - - self.params = load_model(model_shape) - - def get_parameters(self, config): - return get_params(self.params) - - def set_parameters(self, parameters): - set_params(self.params, parameters) + self.params = load_model((input_dim,)) def fit(self, parameters, config): - self.set_parameters(parameters) + set_params(self.params, parameters) self.params, loss, num_examples = train( self.params, self.grad_fn, self.train_x, self.train_y ) - parameters = self.get_parameters(config={}) - return parameters, num_examples, {"loss": float(loss)} + return get_params(self.params), num_examples, {"loss": float(loss)} def evaluate(self, parameters, config): - self.set_parameters(parameters) + set_params(self.params, parameters) loss, num_examples = evaluation( self.params, self.grad_fn, self.test_x, self.test_y ) return float(loss), num_examples, {"loss": float(loss)} + def client_fn(context: Context): + input_dim = context.run_config["input-dim"] + # Return Client instance - return FlowerClient().to_client() + return FlowerClient(input_dim).to_client() # Flower ClientApp diff --git a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl index f3105103842d..157300655a14 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.mlx.py.tpl @@ -3,17 +3,18 @@ import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim -from flwr.client import NumPyClient, ClientApp -from flwr.common import Context +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import UserConfig from $import_name.task import ( + MLP, batch_iterate, eval_fn, get_params, load_data, loss_fn, set_params, - MLP, ) @@ -22,37 +23,24 @@ class FlowerClient(NumPyClient): def __init__( self, data, - num_layers, - hidden_dim, + run_config: UserConfig, num_classes, - batch_size, - learning_rate, - num_epochs, ): - self.num_layers = num_layers - self.hidden_dim = hidden_dim - self.num_classes = num_classes - self.batch_size = batch_size - self.learning_rate = learning_rate - self.num_epochs = num_epochs + num_layers = run_config["num-layers"] + hidden_dim = run_config["hidden-dim"] + input_dim = run_config["input-dim"] + batch_size = run_config["batch-size"] + learning_rate = run_config["lr"] + self.num_epochs = run_config["local-epochs"] self.train_images, self.train_labels, self.test_images, self.test_labels = data - self.model = MLP( - num_layers, self.train_images.shape[-1], hidden_dim, num_classes - ) + self.model = MLP(num_layers, input_dim, hidden_dim, num_classes) self.optimizer = optim.SGD(learning_rate=learning_rate) self.loss_and_grad_fn = nn.value_and_grad(self.model, loss_fn) - self.num_epochs = num_epochs self.batch_size = batch_size - def get_parameters(self, config): - return get_params(self.model) - - def set_parameters(self, parameters): - set_params(self.model, parameters) - def fit(self, parameters, config): - self.set_parameters(parameters) + set_params(self.model, parameters) for _ in range(self.num_epochs): for X, y in batch_iterate( self.batch_size, self.train_images, self.train_labels @@ -60,10 +48,10 @@ class FlowerClient(NumPyClient): _, grads = self.loss_and_grad_fn(self.model, X, y) self.optimizer.update(self.model, grads) mx.eval(self.model.parameters(), self.optimizer.state) - return self.get_parameters(config={}), len(self.train_images), {} + return get_params(self.model), len(self.train_images), {} def evaluate(self, parameters, config): - self.set_parameters(parameters) + set_params(self.model, parameters) accuracy = eval_fn(self.model, self.test_images, self.test_labels) loss = loss_fn(self.model, self.test_images, self.test_labels) return loss.item(), len(self.test_images), {"accuracy": accuracy.item()} @@ -73,18 +61,10 @@ def client_fn(context: Context): partition_id = context.node_config["partition-id"] num_partitions = context.node_config["num-partitions"] data = load_data(partition_id, num_partitions) - - num_layers = context.run_config["num-layers"] - hidden_dim = context.run_config["hidden-dim"] num_classes = 10 - batch_size = context.run_config["batch-size"] - learning_rate = context.run_config["lr"] - num_epochs = context.run_config["local-epochs"] # Return Client instance - return FlowerClient( - data, num_layers, hidden_dim, num_classes, batch_size, learning_rate, num_epochs - ).to_client() + return FlowerClient(data, context.run_config, num_classes).to_client() # Flower ClientApp diff --git a/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl index e35c3c78f6e2..f20bb536b3c6 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.numpy.py.tpl @@ -1,16 +1,15 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.client import NumPyClient, ClientApp +from flwr.client import ClientApp, NumPyClient from flwr.common import Context -import numpy as np +from $import_name.task import get_dummy_model class FlowerClient(NumPyClient): - def get_parameters(self, config): - return [np.ones((1, 1))] def fit(self, parameters, config): - return ([np.ones((1, 1))], 1, {}) + model = get_dummy_model() + return [model], 1, {} def evaluate(self, parameters, config): return float(0.0), 1, {"accuracy": float(1.0)} diff --git a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl index bcade355e22f..e141a34d38ce 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.pytorch.py.tpl @@ -1,17 +1,10 @@ """$project_name: A Flower / $framework_str app.""" import torch -from flwr.client import NumPyClient, ClientApp -from flwr.common import Context -from $import_name.task import ( - Net, - load_data, - get_weights, - set_weights, - train, - test, -) +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.task import Net, get_weights, load_data, set_weights, test, train # Define Flower Client and client_fn @@ -32,7 +25,11 @@ class FlowerClient(NumPyClient): self.local_epochs, self.device, ) - return get_weights(self.net), len(self.trainloader.dataset), {"train_loss": train_loss} + return ( + get_weights(self.net), + len(self.trainloader.dataset), + {"train_loss": train_loss}, + ) def evaluate(self, parameters, config): set_weights(self.net, parameters) diff --git a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl index 2d3d1c7f163a..69d208ac28c9 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.sklearn.py.tpl @@ -2,40 +2,17 @@ import warnings -import numpy as np -from flwr.client import NumPyClient, ClientApp -from flwr.common import Context -from flwr_datasets import FederatedDataset -from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss - -def get_model_parameters(model): - if model.fit_intercept: - params = [ - model.coef_, - model.intercept_, - ] - else: - params = [model.coef_] - return params - - -def set_model_params(model, params): - model.coef_ = params[0] - if model.fit_intercept: - model.intercept_ = params[1] - return model - - -def set_initial_params(model): - n_classes = 10 # MNIST has 10 classes - n_features = 784 # Number of features in dataset - model.classes_ = np.array([i for i in range(10)]) - - model.coef_ = np.zeros((n_classes, n_features)) - if model.fit_intercept: - model.intercept_ = np.zeros((n_classes,)) +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from $import_name.task import ( + get_model, + get_model_params, + load_data, + set_initial_params, + set_model_params, +) class FlowerClient(NumPyClient): @@ -46,9 +23,6 @@ class FlowerClient(NumPyClient): self.y_train = y_train self.y_test = y_test - def get_parameters(self, config): - return get_model_parameters(self.model) - def fit(self, parameters, config): set_model_params(self.model, parameters) @@ -57,7 +31,7 @@ class FlowerClient(NumPyClient): warnings.simplefilter("ignore") self.model.fit(self.X_train, self.y_train) - return get_model_parameters(self.model), len(self.X_train), {} + return get_model_params(self.model), len(self.X_train), {} def evaluate(self, parameters, config): set_model_params(self.model, parameters) @@ -71,21 +45,13 @@ class FlowerClient(NumPyClient): def client_fn(context: Context): partition_id = context.node_config["partition-id"] num_partitions = context.node_config["num-partitions"] - fds = FederatedDataset(dataset="mnist", partitioners={"train": num_partitions}) - dataset = fds.load_partition(partition_id, "train").with_format("numpy") - - X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] - # Split the on edge data: 80% train, 20% test - X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] - y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + X_train, X_test, y_train, y_test = load_data(partition_id, num_partitions) # Create LogisticRegression Model - model = LogisticRegression( - penalty="l2", - max_iter=1, # local epoch - warm_start=True, # prevent refreshing weights when fitting - ) + penalty = context.run_config["penalty"] + local_epochs = context.run_config["local-epochs"] + model = get_model(penalty, local_epochs) # Setting initial parameters, akin to model.compile for keras models set_initial_params(model) diff --git a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl index 48ee3b4f5356..f8c148691561 100644 --- a/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/client.tensorflow.py.tpl @@ -17,9 +17,6 @@ class FlowerClient(NumPyClient): self.batch_size = batch_size self.verbose = verbose - def get_parameters(self, config): - return self.model.get_weights() - def fit(self, parameters, config): self.model.set_weights(parameters) self.model.fit( diff --git a/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl new file mode 100644 index 000000000000..46f1f64418c0 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/dataset.baseline.py.tpl @@ -0,0 +1,36 @@ +"""$project_name: A Flower Baseline.""" + +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor + +FDS = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition CIFAR10 data.""" + # Only initialize `FederatedDataset` once + global FDS # pylint: disable=global-statement + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset="uoft-cs/cifar10", + partitioners={"train": partitioner}, + ) + partition = FDS.load_partition(partition_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2, seed=42) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl deleted file mode 100644 index 637658c5b23c..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/app.py.tpl +++ /dev/null @@ -1,89 +0,0 @@ -"""$project_name: A Flower / FlowerTune app.""" - -import os -import warnings -from datetime import datetime - -from flwr_datasets import FederatedDataset -from hydra import compose, initialize -from hydra.utils import instantiate - -from flwr.client import ClientApp -from flwr.common import Context, ndarrays_to_parameters -from flwr.server import ServerApp, ServerAppComponents, ServerConfig - -from $import_name.client_app import gen_client_fn, get_parameters -from $import_name.dataset import get_tokenizer_and_data_collator_and_propt_formatting -from $import_name.models import get_model -from $import_name.server_app import fit_weighted_average, get_evaluate_fn, get_on_fit_config - -# Avoid warnings -warnings.filterwarnings("ignore", category=UserWarning) -os.environ["TOKENIZERS_PARALLELISM"] = "true" -os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" - -# Initialise regular config -with initialize(config_path="conf", version_base="1.1"): - cfg = compose(config_name="config") - -# Initialise static config -with initialize(config_path="conf", version_base="1.1"): - cfg_static = compose(config_name="static_config") - -cfg.train.num_rounds = cfg_static.num_rounds - -# Create output directory given current timestamp -current_time = datetime.now() -folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") -save_path = os.path.join(os.getcwd(), f"results/{folder_name}") -os.makedirs(save_path, exist_ok=True) - -# Partition dataset and get dataloaders -partitioner = instantiate(cfg_static.partitioner) -fds = FederatedDataset( - dataset=cfg_static.dataset.name, partitioners={"train": partitioner} -) -( - tokenizer, - data_collator, - formatting_prompts_func, -) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) - -# ClientApp for Flower Next -client = ClientApp( - client_fn=gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - cfg.model, - cfg.train, - save_path, - ), -) - -# Get initial model weights -init_model = get_model(cfg.model) -init_model_parameters = get_parameters(init_model) -init_model_parameters = ndarrays_to_parameters(init_model_parameters) - -def server_fn(context: Context): - # Instantiate strategy according to config. Here we pass other arguments - # that are only defined at runtime. - strategy = instantiate( - cfg.strategy, - on_fit_config_fn=get_on_fit_config(), - fit_metrics_aggregation_fn=fit_weighted_average, - initial_parameters=init_model_parameters, - evaluate_fn=get_evaluate_fn( - cfg.model, cfg.train.save_every_round, cfg_static.num_rounds, save_path - ), - ) - - config = ServerConfig(num_rounds=cfg_static.num_rounds) - - return ServerAppComponents(strategy=strategy, config=config) - - -# ServerApp for Flower Next -server = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl similarity index 52% rename from src/py/flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl rename to src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl index 2472e23ece44..415898ba117b 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/client.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/client_app.py.tpl @@ -1,19 +1,35 @@ """$project_name: A Flower / FlowerTune app.""" -from collections import OrderedDict -from typing import Callable, Dict, Tuple +import os +import warnings +from typing import Dict, Tuple import torch +from flwr.client import ClientApp, NumPyClient +from flwr.common import Context +from flwr.common.config import unflatten_dict +from flwr.common.typing import NDArrays, Scalar from omegaconf import DictConfig -from peft import get_peft_model_state_dict, set_peft_model_state_dict + from transformers import TrainingArguments from trl import SFTTrainer -from flwr.client import NumPyClient -from flwr.common import Context -from flwr.common.typing import NDArrays, Scalar -from $import_name.dataset import reformat -from $import_name.models import cosine_annealing, get_model +from $import_name.dataset import ( + get_tokenizer_and_data_collator_and_propt_formatting, + load_data, + replace_keys, +) +from $import_name.models import ( + cosine_annealing, + get_model, + set_parameters, + get_parameters, +) + +# Avoid warnings +os.environ["TOKENIZERS_PARALLELISM"] = "true" +os.environ["RAY_DISABLE_DOCKER_CPU_WARNING"] = "1" +warnings.filterwarnings("ignore", category=UserWarning) # pylint: disable=too-many-arguments @@ -29,7 +45,7 @@ class FlowerClient(NumPyClient): tokenizer, formatting_prompts_func, data_collator, - save_path, + num_rounds, ): # pylint: disable=too-many-arguments self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.train_cfg = train_cfg @@ -37,13 +53,12 @@ class FlowerClient(NumPyClient): self.tokenizer = tokenizer self.formatting_prompts_func = formatting_prompts_func self.data_collator = data_collator - self.save_path = save_path + self.num_rounds = num_rounds + self.trainset = trainset # instantiate model self.model = get_model(model_cfg) - self.trainset = trainset - def fit( self, parameters: NDArrays, config: Dict[str, Scalar] ) -> Tuple[NDArrays, int, Dict]: @@ -52,13 +67,13 @@ class FlowerClient(NumPyClient): new_lr = cosine_annealing( int(config["current_round"]), - self.train_cfg.num_rounds, + self.num_rounds, self.train_cfg.learning_rate_max, self.train_cfg.learning_rate_min, ) self.training_argumnets.learning_rate = new_lr - self.training_argumnets.output_dir = self.save_path + self.training_argumnets.output_dir = config["save_path"] # Construct trainer trainer = SFTTrainer( @@ -81,46 +96,31 @@ class FlowerClient(NumPyClient): ) -def set_parameters(model, parameters: NDArrays) -> None: - """Change the parameters of the model using the given ones.""" - peft_state_dict_keys = get_peft_model_state_dict(model).keys() - params_dict = zip(peft_state_dict_keys, parameters) - state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) - set_peft_model_state_dict(model, state_dict) - - -def get_parameters(model) -> NDArrays: - """Return the parameters of the current net.""" - state_dict = get_peft_model_state_dict(model) - return [val.cpu().numpy() for _, val in state_dict.items()] - - -def gen_client_fn( - fds, - tokenizer, - formatting_prompts_func, - data_collator, - model_cfg: DictConfig, - train_cfg: DictConfig, - save_path: str, -) -> Callable[[Context], FlowerClient]: # pylint: disable=too-many-arguments - """Generate the client function that creates the Flower Clients.""" - - def client_fn(context: Context) -> FlowerClient: - """Create a Flower client representing a single organization.""" - # Let's get the partition corresponding to the i-th client - partition_id = context.node_config["partition-id"] - client_trainset = fds.load_partition(partition_id, "train") - client_trainset = reformat(client_trainset, llm_task="$llm_challenge_str") - - return FlowerClient( - model_cfg, - train_cfg, - client_trainset, - tokenizer, - formatting_prompts_func, - data_collator, - save_path, - ).to_client() - - return client_fn +def client_fn(context: Context) -> FlowerClient: + """Create a Flower client representing a single organization.""" + partition_id = context.node_config["partition-id"] + num_partitions = context.node_config["num-partitions"] + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Let's get the client partition + client_trainset = load_data(partition_id, num_partitions, cfg.static.dataset.name) + ( + tokenizer, + data_collator, + formatting_prompts_func, + ) = get_tokenizer_and_data_collator_and_propt_formatting(cfg.model.name) + + return FlowerClient( + cfg.model, + cfg.train, + client_trainset, + tokenizer, + formatting_prompts_func, + data_collator, + num_rounds, + ).to_client() + + +# Flower ClientApp +app = ClientApp(client_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl deleted file mode 100644 index 9f700dd5b8da..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/config.yaml.tpl +++ /dev/null @@ -1,34 +0,0 @@ -# Federated Instruction Tuning ---- -model: - name: "mistralai/Mistral-7B-v0.3" - quantization: 4 # 8 or 4 if you want to do quantization with BitsAndBytes - gradient_checkpointing: True - lora: - peft_lora_r: 32 - peft_lora_alpha: 64 - -train: - num_rounds: null - save_every_round: 5 - learning_rate_max: 5e-5 - learning_rate_min: 1e-6 - seq_length: 512 - training_arguments: - output_dir: null # to be set by hydra - learning_rate: null # to be set by the client - per_device_train_batch_size: 16 - gradient_accumulation_steps: 1 - logging_steps: 10 - num_train_epochs: 3 - max_steps: 10 - report_to: null - save_steps: 1000 - save_total_limit: 10 - gradient_checkpointing: True - lr_scheduler_type: "constant" - -strategy: - _target_: flwr.server.strategy.FedAvg - fraction_fit: $fraction_fit - fraction_evaluate: 0.0 # no client evaluation diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl index 1b3691d7cf3c..41381ef7c7a3 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/dataset.py.tpl @@ -1,8 +1,12 @@ """$project_name: A Flower / FlowerTune app.""" +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from transformers import AutoTokenizer from trl import DataCollatorForCompletionOnlyLM +FDS = None # Cache FederatedDataset + def formatting_prompts_func(example): """Construct prompts.""" @@ -24,7 +28,6 @@ def formatting_prompts_func(example): def get_tokenizer_and_data_collator_and_propt_formatting(model_name: str): """Get tokenizer, data_collator and prompt formatting.""" - # From: https://huggingface.co/docs/trl/en/sft_trainer tokenizer = AutoTokenizer.from_pretrained( model_name, use_fast=True, padding_side="right" ) @@ -49,9 +52,36 @@ def formatting(dataset): def reformat(dataset, llm_task): """Reformat datasets.""" dataset = dataset.rename_column("output", "response") - if llm_task == "finance" or llm_task == "code": + if llm_task in ["finance", "code"]: dataset = dataset.map(formatting, remove_columns=["input"]) if llm_task == "medical": dataset = dataset.remove_columns(["instruction"]) dataset = dataset.rename_column("input", "instruction") return dataset + + +def load_data(partition_id: int, num_partitions: int, dataset_name: str): + """Load partition data.""" + # Only initialize `FederatedDataset` once + global FDS + if FDS is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + FDS = FederatedDataset( + dataset=dataset_name, + partitioners={"train": partitioner}, + ) + client_trainset = FDS.load_partition(partition_id, "train") + client_trainset = reformat(client_trainset, llm_task="generalnlp") + return client_trainset + + +def replace_keys(input_dict, match="-", target="_"): + """Recursively replace match string with target string in dictionary keys.""" + new_dict = {} + for key, value in input_dict.items(): + new_key = key.replace(match, target) + if isinstance(value, dict): + new_dict[new_key] = replace_keys(value, match, target) + else: + new_dict[new_key] = value + return new_dict diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl index a2794f35518c..3f3f95c8b8eb 100644 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/models.py.tpl @@ -4,10 +4,18 @@ import math import torch from omegaconf import DictConfig -from peft import LoraConfig, get_peft_model +from collections import OrderedDict +from peft import ( + LoraConfig, + get_peft_model, + get_peft_model_state_dict, + set_peft_model_state_dict, +) from peft.utils import prepare_model_for_kbit_training from transformers import AutoModelForCausalLM, BitsAndBytesConfig +from flwr.common.typing import NDArrays + def cosine_annealing( current_round: int, @@ -22,9 +30,6 @@ def cosine_annealing( def get_model(model_cfg: DictConfig): """Load model with appropriate quantization config and other optimizations. - - Please refer to this example for `peft + BitsAndBytes`: - https://github.com/huggingface/peft/blob/main/examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py """ if model_cfg.quantization == 4: quantization_config = BitsAndBytesConfig(load_in_4bit=True) @@ -57,3 +62,17 @@ def get_model(model_cfg: DictConfig): model.config.use_cache = False return get_peft_model(model, peft_config) + + +def set_parameters(model, parameters: NDArrays) -> None: + """Change the parameters of the model using the given ones.""" + peft_state_dict_keys = get_peft_model_state_dict(model).keys() + params_dict = zip(peft_state_dict_keys, parameters) + state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict}) + set_peft_model_state_dict(model, state_dict) + + +def get_parameters(model) -> NDArrays: + """Return the parameters of the current net.""" + state_dict = get_peft_model_state_dict(model) + return [val.cpu().numpy() for _, val in state_dict.items()] diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl deleted file mode 100644 index 5dd4d881f2f1..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server.py.tpl +++ /dev/null @@ -1,48 +0,0 @@ -"""$project_name: A Flower / FlowerTune app.""" - -from $import_name.client_app import set_parameters -from $import_name.models import get_model - - -# Get function that will be executed by the strategy's evaluate() method -# Here we use it to save global model checkpoints -def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): - """Return an evaluation function for saving global model.""" - - def evaluate(server_round: int, parameters, config): - # Save model - if server_round != 0 and ( - server_round == total_round or server_round % save_every_round == 0 - ): - # Init model - model = get_model(model_cfg) - set_parameters(model, parameters) - - model.save_pretrained(f"{save_path}/peft_{server_round}") - - return 0.0, {} - - return evaluate - - -def get_on_fit_config(): - """ - Return a function that will be used to construct the config - that the client's fit() method will receive. - """ - - def fit_config_fn(server_round: int): - fit_config = {"current_round": server_round} - return fit_config - - return fit_config_fn - - -def fit_weighted_average(metrics): - """Aggregate (federated) evaluation metrics.""" - # Multiply accuracy of each client by number of examples used - losses = [num_examples * m["train_loss"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - return {"train_loss": sum(losses) / sum(examples)} diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl new file mode 100644 index 000000000000..7d4de0f73dbf --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/server_app.py.tpl @@ -0,0 +1,94 @@ +"""$project_name: A Flower / FlowerTune app.""" + +import os +from datetime import datetime + +from flwr.common import Context, ndarrays_to_parameters +from flwr.common.config import unflatten_dict +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from omegaconf import DictConfig + +from $import_name.models import get_model, get_parameters, set_parameters +from $import_name.dataset import replace_keys +from $import_name.strategy import FlowerTuneLlm + + +# Get function that will be executed by the strategy's evaluate() method +# Here we use it to save global model checkpoints +def get_evaluate_fn(model_cfg, save_every_round, total_round, save_path): + """Return an evaluation function for saving global model.""" + + def evaluate(server_round: int, parameters, config): + # Save model + if server_round != 0 and ( + server_round == total_round or server_round % save_every_round == 0 + ): + # Init model + model = get_model(model_cfg) + set_parameters(model, parameters) + + model.save_pretrained(f"{save_path}/peft_{server_round}") + + return 0.0, {} + + return evaluate + + +def get_on_fit_config(save_path): + """Return a function that will be used to construct the config that the + client's fit() method will receive.""" + + def fit_config_fn(server_round: int): + fit_config = {} + fit_config["current_round"] = server_round + fit_config["save_path"] = save_path + return fit_config + + return fit_config_fn + + +def fit_weighted_average(metrics): + """Aggregate (federated) evaluation metrics.""" + # Multiply accuracy of each client by number of examples used + losses = [num_examples * m["train_loss"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"train_loss": sum(losses) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Create output directory given current timestamp + current_time = datetime.now() + folder_name = current_time.strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(os.getcwd(), f"results/{folder_name}") + os.makedirs(save_path, exist_ok=True) + + # Read from config + num_rounds = context.run_config["num-server-rounds"] + cfg = DictConfig(replace_keys(unflatten_dict(context.run_config))) + + # Get initial model weights + init_model = get_model(cfg.model) + init_model_parameters = get_parameters(init_model) + init_model_parameters = ndarrays_to_parameters(init_model_parameters) + + # Define strategy + strategy = FlowerTuneLlm( + fraction_fit=cfg.strategy.fraction_fit, + fraction_evaluate=cfg.strategy.fraction_evaluate, + on_fit_config_fn=get_on_fit_config(save_path), + fit_metrics_aggregation_fn=fit_weighted_average, + initial_parameters=init_model_parameters, + evaluate_fn=get_evaluate_fn( + cfg.model, cfg.train.save_every_round, num_rounds, save_path + ), + ) + config = ServerConfig(num_rounds=num_rounds) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Flower ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl deleted file mode 100644 index a8a4039fc831..000000000000 --- a/src/py/flwr/cli/new/templates/app/code/flwr_tune/static_config.yaml.tpl +++ /dev/null @@ -1,11 +0,0 @@ -# Federated Instruction Tuning (static) ---- -dataset: - name: $dataset_name - -# FL experimental settings -num_clients: $num_clients # total number of clients -num_rounds: 200 -partitioner: - _target_: flwr_datasets.partitioner.IidPartitioner - num_partitions: $num_clients diff --git a/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl b/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl new file mode 100644 index 000000000000..8accd70c4e76 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/flwr_tune/strategy.py.tpl @@ -0,0 +1,83 @@ +"""$project_name: A Flower / FlowerTune app.""" + +from io import BytesIO +from logging import INFO, WARN +from typing import List, Tuple, Union + +from flwr.common import FitIns, FitRes, Parameters, log, parameters_to_ndarrays +from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy +from flwr.server.strategy import FedAvg + + +class FlowerTuneLlm(FedAvg): + """Customised FedAvg strategy implementation. + + This class behaves just like FedAvg but also tracks the communication + costs associated with `fit` over FL rounds. + """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.comm_tracker = CommunicationTracker() + + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ): + """Configure the next round of training.""" + return_clients = super().configure_fit(server_round, parameters, client_manager) + + # Test communication costs + fit_ins_list = [fit_ins for _, fit_ins in return_clients] + self.comm_tracker.track(fit_ins_list) + + return return_clients + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ): + """Aggregate fit results using weighted average.""" + # Test communication costs + fit_res_list = [fit_res for _, fit_res in results] + self.comm_tracker.track(fit_res_list) + + parameters_aggregated, metrics_aggregated = super().aggregate_fit( + server_round, results, failures + ) + + return parameters_aggregated, metrics_aggregated + + +class CommunicationTracker: + """Communication costs tracker over FL rounds.""" + def __init__(self): + self.curr_comm_cost = 0.0 + + @staticmethod + def _compute_bytes(parameters): + return sum([BytesIO(t).getbuffer().nbytes for t in parameters.tensors]) + + def track(self, fit_list: List[Union[FitIns, FitRes]]): + size_bytes_list = [ + self._compute_bytes(fit_ele.parameters) + for fit_ele in fit_list + ] + comm_cost = sum(size_bytes_list) / 1024**2 + + self.curr_comm_cost += comm_cost + log( + INFO, + "Communication budget: used %.2f MB (+%.2f MB this round) / 200,000 MB", + self.curr_comm_cost, + comm_cost, + ) + + if self.curr_comm_cost > 2e5: + log( + WARN, + "The accumulated communication cost has exceeded 200,000 MB. " + "Please consider reducing it if you plan to participate " + "FlowerTune LLM Leaderboard.", + ) diff --git a/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl new file mode 100644 index 000000000000..8a914fcf60d1 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/model.baseline.py.tpl @@ -0,0 +1,80 @@ +"""$project_name: A Flower Baseline.""" + +from collections import OrderedDict + +import torch +import torch.nn.functional as F +from torch import nn + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz').""" + + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + """Do forward.""" + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, epochs, device): + """Train the model on the training set.""" + net.to(device) # move model to GPU if available + criterion = torch.nn.CrossEntropyLoss() + criterion.to(device) + optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + net.train() + running_loss = 0.0 + for _ in range(epochs): + for batch in trainloader: + images = batch["img"] + labels = batch["label"] + optimizer.zero_grad() + loss = criterion(net(images.to(device)), labels.to(device)) + loss.backward() + optimizer.step() + running_loss += loss.item() + + avg_trainloss = running_loss / len(trainloader) + return avg_trainloss + + +def test(net, testloader, device): + """Validate the model on the test set.""" + net.to(device) + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for batch in testloader: + images = batch["img"].to(device) + labels = batch["label"].to(device) + outputs = net(images) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + loss = loss / len(testloader) + return loss, accuracy + + +def get_weights(net): + """Extract model parameters as numpy arrays from state_dict.""" + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + +def set_weights(net, parameters): + """Apply parameters to an existing model.""" + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) diff --git a/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl new file mode 100644 index 000000000000..ea536e3efffb --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/server.baseline.py.tpl @@ -0,0 +1,46 @@ +"""$project_name: A Flower Baseline.""" + +from typing import List, Tuple + +from flwr.common import Context, Metrics, ndarrays_to_parameters +from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from $import_name.model import Net, get_weights + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Do weighted average of accuracy metric.""" + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * float(m["accuracy"]) for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +def server_fn(context: Context): + """Construct components that set the ServerApp behaviour.""" + # Read from config + num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize model parameters + ndarrays = get_weights(Net()) + parameters = ndarrays_to_parameters(ndarrays) + + # Define strategy + strategy = FedAvg( + fraction_fit=float(fraction_fit), + fraction_evaluate=1.0, + min_available_clients=2, + initial_parameters=parameters, + evaluate_metrics_aggregation_fn=weighted_average, + ) + config = ServerConfig(num_rounds=int(num_rounds)) + + return ServerAppComponents(strategy=strategy, config=config) + + +# Create ServerApp +app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl index 5491f6616160..16f94f0a64e9 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.huggingface.py.tpl @@ -1,18 +1,33 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context -from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from transformers import AutoModelForSequenceClassification + +from $import_name.task import get_weights def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + fraction_fit = context.run_config["fraction-fit"] + + # Initialize global model + model_name = context.run_config["model-name"] + num_labels = context.run_config["num-labels"] + net = AutoModelForSequenceClassification.from_pretrained( + model_name, num_labels=num_labels + ) + + weights = get_weights(net) + initial_parameters = ndarrays_to_parameters(weights) # Define strategy strategy = FedAvg( - fraction_fit=1.0, + fraction_fit=fraction_fit, fraction_evaluate=1.0, + initial_parameters=initial_parameters, ) config = ServerConfig(num_rounds=num_rounds) diff --git a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl index 514185fde970..60bbcaf3c175 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.jax.py.tpl @@ -1,16 +1,22 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context -from flwr.server.strategy import FedAvg +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig +from flwr.server.strategy import FedAvg +from $import_name.task import get_params, load_model def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + input_dim = context.run_config["input-dim"] + + # Initialize global model + params = get_params(load_model((input_dim,))) + initial_parameters = ndarrays_to_parameters(params) # Define strategy - strategy = FedAvg() + strategy = FedAvg(initial_parameters=initial_parameters) config = ServerConfig(num_rounds=num_rounds) return ServerAppComponents(strategy=strategy, config=config) diff --git a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl index c99c72574813..6d00e84fe383 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.mlx.py.tpl @@ -1,16 +1,27 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg +from $import_name.task import MLP, get_params def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + num_classes = 10 + num_layers = context.run_config["num-layers"] + input_dim = context.run_config["input-dim"] + hidden_dim = context.run_config["hidden-dim"] + + # Initialize global model + model = MLP(num_layers, input_dim, hidden_dim, num_classes) + params = get_params(model) + initial_parameters = ndarrays_to_parameters(params) + # Define strategy - strategy = FedAvg() + strategy = FedAvg(initial_parameters=initial_parameters) config = ServerConfig(num_rounds=num_rounds) return ServerAppComponents(strategy=strategy, config=config) diff --git a/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl index c99c72574813..ec1ff52811af 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.numpy.py.tpl @@ -1,16 +1,21 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg +from $import_name.task import get_dummy_model def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + # Initial model + model = get_dummy_model() + dummy_parameters = ndarrays_to_parameters([model]) + # Define strategy - strategy = FedAvg() + strategy = FedAvg(initial_parameters=dummy_parameters) config = ServerConfig(num_rounds=num_rounds) return ServerAppComponents(strategy=strategy, config=config) diff --git a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl index 39185965b3a5..9fe5f0fedc28 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.pytorch.py.tpl @@ -3,7 +3,6 @@ from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg - from $import_name.task import Net, get_weights @@ -27,5 +26,6 @@ def server_fn(context: Context): return ServerAppComponents(strategy=strategy, config=config) + # Create ServerApp app = ServerApp(server_fn=server_fn) diff --git a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl index 678ba9326229..b1487b01d2d3 100644 --- a/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/server.sklearn.py.tpl @@ -1,19 +1,31 @@ """$project_name: A Flower / $framework_str app.""" -from flwr.common import Context +from flwr.common import Context, ndarrays_to_parameters from flwr.server import ServerApp, ServerAppComponents, ServerConfig from flwr.server.strategy import FedAvg +from $import_name.task import get_model, get_model_params, set_initial_params def server_fn(context: Context): # Read from config num_rounds = context.run_config["num-server-rounds"] + # Create LogisticRegression Model + penalty = context.run_config["penalty"] + local_epochs = context.run_config["local-epochs"] + model = get_model(penalty, local_epochs) + + # Setting initial parameters, akin to model.compile for keras models + set_initial_params(model) + + initial_parameters = ndarrays_to_parameters(get_model_params(model)) + # Define strategy strategy = FedAvg( fraction_fit=1.0, fraction_evaluate=1.0, min_available_clients=2, + initial_parameters=initial_parameters, ) config = ServerConfig(num_rounds=num_rounds) diff --git a/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/strategy.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl index ad52e2c3fe21..1c50e85d7103 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.huggingface.py.tpl @@ -4,24 +4,25 @@ import warnings from collections import OrderedDict import torch +import transformers +from datasets.utils.logging import disable_progress_bar from evaluate import load as load_metric +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoTokenizer, DataCollatorWithPadding -from flwr_datasets import FederatedDataset -from flwr_datasets.partitioner import IidPartitioner - - warnings.filterwarnings("ignore", category=UserWarning) -DEVICE = torch.device("cpu") -CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint +warnings.filterwarnings("ignore", category=FutureWarning) +disable_progress_bar() +transformers.logging.set_verbosity_error() fds = None # Cache FederatedDataset -def load_data(partition_id: int, num_partitions: int): +def load_data(partition_id: int, num_partitions: int, model_name: str): """Load IMDB data (training and eval)""" # Only initialize `FederatedDataset` once global fds @@ -35,10 +36,12 @@ def load_data(partition_id: int, num_partitions: int): # Divide data: 80% train, 20% test partition_train_test = partition.train_test_split(test_size=0.2, seed=42) - tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT) + tokenizer = AutoTokenizer.from_pretrained(model_name) def tokenize_function(examples): - return tokenizer(examples["text"], truncation=True) + return tokenizer( + examples["text"], truncation=True, add_special_tokens=True, max_length=512 + ) partition_train_test = partition_train_test.map(tokenize_function, batched=True) partition_train_test = partition_train_test.remove_columns("text") @@ -59,12 +62,12 @@ def load_data(partition_id: int, num_partitions: int): return trainloader, testloader -def train(net, trainloader, epochs): +def train(net, trainloader, epochs, device): optimizer = AdamW(net.parameters(), lr=5e-5) net.train() for _ in range(epochs): for batch in trainloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} outputs = net(**batch) loss = outputs.loss loss.backward() @@ -72,12 +75,12 @@ def train(net, trainloader, epochs): optimizer.zero_grad() -def test(net, testloader): +def test(net, testloader, device): metric = load_metric("accuracy") loss = 0 net.eval() for batch in testloader: - batch = {k: v.to(DEVICE) for k, v in batch.items()} + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = net(**batch) logits = outputs.logits diff --git a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl index fc6ef9dee3dd..428f752845c1 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.jax.py.tpl @@ -2,9 +2,9 @@ import jax import jax.numpy as jnp +import numpy as np from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split -import numpy as np key = jax.random.PRNGKey(0) @@ -33,7 +33,7 @@ def train(params, grad_fn, X, y): num_examples = X.shape[0] for epochs in range(50): grads = grad_fn(params, X, y) - params = jax.tree_map(lambda p, g: p - 0.05 * g, params, grads) + params = jax.tree.map(lambda p, g: p - 0.05 * g, params, grads) loss = loss_fn(params, X, y) return params, loss, num_examples diff --git a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl index f959cd1d64e3..63db6c28f034 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.mlx.py.tpl @@ -3,10 +3,10 @@ import mlx.core as mx import mlx.nn as nn import numpy as np -from datasets.utils.logging import disable_progress_bar from flwr_datasets import FederatedDataset from flwr_datasets.partitioner import IidPartitioner +from datasets.utils.logging import disable_progress_bar disable_progress_bar() diff --git a/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl new file mode 100644 index 000000000000..9b76fc055caf --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.numpy.py.tpl @@ -0,0 +1,7 @@ +"""$project_name: A Flower / $framework_str app.""" + +import numpy as np + + +def get_dummy_model(): + return np.ones((1, 1)) diff --git a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl index 5562371ad460..a3c015bfee88 100644 --- a/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl +++ b/src/py/flwr/cli/new/templates/app/code/task.pytorch.py.tpl @@ -5,10 +5,10 @@ from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F -from torch.utils.data import DataLoader -from torchvision.transforms import Compose, Normalize, ToTensor from flwr_datasets import FederatedDataset from flwr_datasets.partitioner import IidPartitioner +from torch.utils.data import DataLoader +from torchvision.transforms import Compose, Normalize, ToTensor class Net(nn.Module): @@ -67,7 +67,7 @@ def train(net, trainloader, epochs, device): """Train the model on the training set.""" net.to(device) # move model to GPU if available criterion = torch.nn.CrossEntropyLoss().to(device) - optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9) + optimizer = torch.optim.Adam(net.parameters(), lr=0.01) net.train() running_loss = 0.0 for _ in range(epochs): diff --git a/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl b/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl new file mode 100644 index 000000000000..52c13edc032c --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/task.sklearn.py.tpl @@ -0,0 +1,67 @@ +"""$project_name: A Flower / $framework_str app.""" + +import numpy as np +from flwr_datasets import FederatedDataset +from flwr_datasets.partitioner import IidPartitioner +from sklearn.linear_model import LogisticRegression + +fds = None # Cache FederatedDataset + + +def load_data(partition_id: int, num_partitions: int): + """Load partition MNIST data.""" + # Only initialize `FederatedDataset` once + global fds + if fds is None: + partitioner = IidPartitioner(num_partitions=num_partitions) + fds = FederatedDataset( + dataset="mnist", + partitioners={"train": partitioner}, + ) + + dataset = fds.load_partition(partition_id, "train").with_format("numpy") + + X, y = dataset["image"].reshape((len(dataset), -1)), dataset["label"] + + # Split the on edge data: 80% train, 20% test + X_train, X_test = X[: int(0.8 * len(X))], X[int(0.8 * len(X)) :] + y_train, y_test = y[: int(0.8 * len(y))], y[int(0.8 * len(y)) :] + + return X_train, X_test, y_train, y_test + + +def get_model(penalty: str, local_epochs: int): + + return LogisticRegression( + penalty=penalty, + max_iter=local_epochs, + warm_start=True, + ) + + +def get_model_params(model): + if model.fit_intercept: + params = [ + model.coef_, + model.intercept_, + ] + else: + params = [model.coef_] + return params + + +def set_model_params(model, params): + model.coef_ = params[0] + if model.fit_intercept: + model.intercept_ = params[1] + return model + + +def set_initial_params(model): + n_classes = 10 # MNIST has 10 classes + n_features = 784 # Number of features in dataset + model.classes_ = np.array([i for i in range(10)]) + + model.coef_ = np.zeros((n_classes, n_features)) + if model.fit_intercept: + model.intercept_ = np.zeros((n_classes,)) diff --git a/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl b/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl new file mode 100644 index 000000000000..5ad8041381d6 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/code/utils.baseline.py.tpl @@ -0,0 +1 @@ +"""$project_name: A Flower Baseline.""" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl new file mode 100644 index 000000000000..c70580009392 --- /dev/null +++ b/src/py/flwr/cli/new/templates/app/pyproject.baseline.toml.tpl @@ -0,0 +1,138 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "$package_name" +version = "1.0.0" +description = "" +license = "Apache-2.0" +dependencies = [ + "flwr[simulation]>=1.12.0", + "flwr-datasets[vision]>=0.3.0", + "torch==2.2.1", + "torchvision==0.17.1", +] + +[tool.hatch.metadata] +allow-direct-references = true + +[project.optional-dependencies] +dev = [ + "isort==5.13.2", + "black==24.2.0", + "docformatter==1.7.5", + "mypy==1.8.0", + "pylint==3.2.6", + "flake8==5.0.4", + "pytest==6.2.4", + "pytest-watch==4.2.0", + "ruff==0.1.9", + "types-requests==2.31.0.20240125", +] + +[tool.isort] +profile = "black" +known_first_party = ["flwr"] + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y,K,N" +max-args = 10 +max-attributes = 15 +max-locals = 36 +max-branches = 20 +max-statements = 55 + +[tool.pylint.typecheck] +generated-members = "numpy.*, torch.*, tensorflow.*" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" + +[tool.hatch.build.targets.wheel] +packages = ["."] + +[tool.flwr.app] +publisher = "$username" + +[tool.flwr.app.components] +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" + +[tool.flwr.app.config] +num-server-rounds = 3 +fraction-fit = 0.5 +local-epochs = 1 + +[tool.flwr.federations] +default = "local-simulation" + +[tool.flwr.federations.local-simulation] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 2 +options.backend.client-resources.num-gpus = 0.0 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl index b564a66090d2..d34985d50433 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.flowertune.toml.tpl @@ -8,15 +8,16 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.9.0,<2.0", - "flwr-datasets>=0.1.0,<1.0.0", - "hydra-core==1.3.2", + "flwr[simulation]>=1.12.0", + "flwr-datasets>=0.3.0", "trl==0.8.1", "bitsandbytes==0.43.0", "scipy==1.13.0", "peft==0.6.2", - "transformers==4.39.3", + "transformers==4.43.1", "sentencepiece==0.2.0", + "omegaconf==2.3.0", + "hf_transfer==0.1.8", ] [tool.hatch.build.targets.wheel] @@ -26,14 +27,41 @@ packages = ["."] publisher = "$username" [tool.flwr.app.components] -serverapp = "$import_name.app:server" -clientapp = "$import_name.app:client" +serverapp = "$import_name.server_app:app" +clientapp = "$import_name.client_app:app" [tool.flwr.app.config] -num-server-rounds = 3 +model.name = "mistralai/Mistral-7B-v0.3" +model.quantization = 4 +model.gradient-checkpointing = true +model.lora.peft-lora-r = 32 +model.lora.peft-lora-alpha = 64 +train.save-every-round = 5 +train.learning-rate-max = 5e-5 +train.learning-rate-min = 1e-6 +train.seq-length = 512 +train.training-arguments.output-dir = "" +train.training-arguments.learning-rate = "" +train.training-arguments.per-device-train-batch-size = 16 +train.training-arguments.gradient-accumulation-steps = 1 +train.training-arguments.logging-steps = 10 +train.training-arguments.num-train-epochs = 3 +train.training-arguments.max-steps = 10 +train.training-arguments.save-steps = 1000 +train.training-arguments.save-total-limit = 10 +train.training-arguments.gradient-checkpointing = true +train.training-arguments.lr-scheduler-type = "constant" +strategy.fraction-fit = $fraction_fit +strategy.fraction-evaluate = 0.0 +num-server-rounds = 200 + +[tool.flwr.app.config.static] +dataset.name = "$dataset_name" [tool.flwr.federations] default = "local-simulation" [tool.flwr.federations.local-simulation] -options.num-supernodes = 10 +options.num-supernodes = $num_clients +options.backend.client-resources.num-cpus = 6 +options.backend.client-resources.num-gpus = 1.0 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl index 15dc2af87a3f..3515cbd69d17 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.huggingface.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets>=0.3.0", "torch==2.2.1", "transformers>=4.30.0,<5.0", @@ -29,10 +29,18 @@ clientapp = "$import_name.client_app:app" [tool.flwr.app.config] num-server-rounds = 3 +fraction-fit = 0.5 local-epochs = 1 +model-name = "prajjwal1/bert-tiny" # Set a larger model if you have access to more GPU resources +num-labels = 2 [tool.flwr.federations] default = "localhost" [tool.flwr.federations.localhost] options.num-supernodes = 10 + +[tool.flwr.federations.localhost-gpu] +options.num-supernodes = 10 +options.backend.client-resources.num-cpus = 4 # each ClientApp assumes to use 4CPUs +options.backend.client-resources.num-gpus = 0.25 # at most 4 ClientApps will run in a given GPU diff --git a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl index 31fff1c2a4c8..7c55d3654a08 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.jax.toml.tpl @@ -8,9 +8,9 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", - "jax==0.4.13", - "jaxlib==0.4.13", + "flwr[simulation]>=1.12.0", + "jax==0.4.30", + "jaxlib==0.4.30", "scikit-learn==1.3.2", ] @@ -26,6 +26,7 @@ clientapp = "$import_name.client_app:app" [tool.flwr.app.config] num-server-rounds = 3 +input-dim = 3 [tool.flwr.federations] default = "local-simulation" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl index c1bfe804c709..9ea11ff3fc0c 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.mlx.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "mlx==0.16.1", "numpy==1.24.4", @@ -28,6 +28,7 @@ clientapp = "$import_name.client_app:app" num-server-rounds = 3 local-epochs = 1 num-layers = 2 +input-dim = 784 # 28*28 hidden-dim = 32 batch-size = 256 lr = 0.1 diff --git a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl index 953e556ad012..9f8f3aaab554 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.numpy.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "numpy>=1.21.0", ] diff --git a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl index ccaf88c19e42..fe5ac7735d66 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.pytorch.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "torch==2.2.1", "torchvision==0.17.1", diff --git a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl index 2b5778fec9a7..d5fec5f2f93f 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.sklearn.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "scikit-learn>=1.1.1", ] @@ -25,6 +25,8 @@ clientapp = "$import_name.client_app:app" [tool.flwr.app.config] num-server-rounds = 3 +penalty = "l2" +local-epochs = 1 [tool.flwr.federations] default = "local-simulation" diff --git a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl index 11f7d1083abc..81a839b30998 100644 --- a/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl +++ b/src/py/flwr/cli/new/templates/app/pyproject.tensorflow.toml.tpl @@ -8,7 +8,7 @@ version = "1.0.0" description = "" license = "Apache-2.0" dependencies = [ - "flwr[simulation]>=1.10.0", + "flwr[simulation]>=1.12.0", "flwr-datasets[vision]>=0.3.0", "tensorflow>=2.11.1", ] diff --git a/src/py/flwr/cli/run/run.py b/src/py/flwr/cli/run/run.py index b2c4dc4151cd..4722effee53d 100644 --- a/src/py/flwr/cli/run/run.py +++ b/src/py/flwr/cli/run/run.py @@ -14,16 +14,14 @@ # ============================================================================== """Flower command line interface `run` command.""" -import hashlib import json import subprocess import sys from logging import DEBUG from pathlib import Path -from typing import Any, Dict, List, Optional +from typing import Annotated, Any, Optional import typer -from typing_extensions import Annotated from flwr.cli.build import build from flwr.cli.config_utils import load_and_validate @@ -35,6 +33,10 @@ from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 from flwr.proto.exec_pb2_grpc import ExecStub +from ..log import start_stream + +CONN_REFRESH_PERIOD = 60 # Connection refresh period for log streaming (seconds) + def on_channel_state_change(channel_connectivity: str) -> None: """Log channel connectivity.""" @@ -52,7 +54,7 @@ def run( typer.Argument(help="Name of the federation to run the app on."), ] = None, config_overrides: Annotated[ - Optional[List[str]], + Optional[list[str]], typer.Option( "--run-config", "-c", @@ -63,6 +65,14 @@ def run( "inside the `pyproject.toml` in order to be properly overriden.", ), ] = None, + stream: Annotated[ + bool, + typer.Option( + "--stream", + help="Use `--stream` with `flwr run` to display logs;\n " + "logs are not streamed by default.", + ), + ] = False, ) -> None: """Run Flower App.""" typer.secho("Loading project configuration... ", fg=typer.colors.BLUE) @@ -118,20 +128,22 @@ def run( raise typer.Exit(code=1) if "address" in federation_config: - _run_with_superexec(app, federation_config, config_overrides) + _run_with_superexec(app, federation_config, config_overrides, stream) else: _run_without_superexec(app, federation_config, config_overrides, federation) +# pylint: disable=too-many-locals def _run_with_superexec( - app: Optional[Path], - federation_config: Dict[str, Any], - config_overrides: Optional[List[str]], + app: Path, + federation_config: dict[str, Any], + config_overrides: Optional[list[str]], + stream: bool, ) -> None: insecure_str = federation_config.get("insecure") if root_certificates := federation_config.get("root-certificates"): - root_certificates_bytes = Path(root_certificates).read_bytes() + root_certificates_bytes = (app / root_certificates).read_bytes() if insecure := bool(insecure_str): typer.secho( "❌ `root_certificates` were provided but the `insecure` parameter" @@ -167,9 +179,9 @@ def _run_with_superexec( channel.subscribe(on_channel_state_change) stub = ExecStub(channel) - fab_path = Path(build(app)) - content = fab_path.read_bytes() - fab = Fab(hashlib.sha256(content).hexdigest(), content) + fab_path, fab_hash = build(app) + content = Path(fab_path).read_bytes() + fab = Fab(fab_hash, content) req = StartRunRequest( fab=fab_to_proto(fab), @@ -181,14 +193,17 @@ def _run_with_superexec( res = stub.StartRun(req) # Delete FAB file once it has been sent to the SuperExec - fab_path.unlink() + Path(fab_path).unlink() typer.secho(f"🎊 Successfully started run {res.run_id}", fg=typer.colors.GREEN) + if stream: + start_stream(res.run_id, channel, CONN_REFRESH_PERIOD) + def _run_without_superexec( app: Optional[Path], - federation_config: Dict[str, Any], - config_overrides: Optional[List[str]], + federation_config: dict[str, Any], + config_overrides: Optional[list[str]], federation: str, ) -> None: try: diff --git a/src/py/flwr/cli/utils.py b/src/py/flwr/cli/utils.py index 2f5a8831fa7c..e725fdd3f951 100644 --- a/src/py/flwr/cli/utils.py +++ b/src/py/flwr/cli/utils.py @@ -17,7 +17,7 @@ import hashlib import re from pathlib import Path -from typing import Callable, List, Optional, cast +from typing import Callable, Optional, cast import typer @@ -40,7 +40,7 @@ def prompt_text( return cast(str, result) -def prompt_options(text: str, options: List[str]) -> str: +def prompt_options(text: str, options: list[str]) -> str: """Ask user to select one of the given options and return the selected item.""" # Turn options into a list with index as in " [ 0] quickstart-pytorch" options_formatted = [ diff --git a/src/py/flwr/client/__init__.py b/src/py/flwr/client/__init__.py index 218f2fe20d62..dce3be9036bb 100644 --- a/src/py/flwr/client/__init__.py +++ b/src/py/flwr/client/__init__.py @@ -20,8 +20,6 @@ from .client import Client as Client from .client_app import ClientApp as ClientApp from .numpy_client import NumPyClient as NumPyClient -from .supernode import run_client_app as run_client_app -from .supernode import run_supernode as run_supernode from .typing import ClientFn as ClientFn from .typing import ClientFnExt as ClientFnExt @@ -32,8 +30,6 @@ "ClientFnExt", "NumPyClient", "mod", - "run_client_app", - "run_supernode", "start_client", "start_numpy_client", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index fb4855a09817..e803eaf88864 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -18,10 +18,11 @@ import subprocess import sys import time +from contextlib import AbstractContextManager from dataclasses import dataclass from logging import ERROR, INFO, WARN from pathlib import Path -from typing import Callable, ContextManager, Dict, Optional, Tuple, Type, Union, cast +from typing import Callable, Optional, Union, cast import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -35,6 +36,9 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH, Context, EventType, Message, event from flwr.common.address import parse_address from flwr.common.constant import ( + CLIENTAPPIO_API_DEFAULT_ADDRESS, + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, MISSING_EXTRA_REST, RUN_ID_NUM_BYTES, TRANSPORT_TYPE_GRPC_ADAPTER, @@ -50,20 +54,15 @@ from flwr.common.typing import Fab, Run, UserConfig from flwr.proto.clientappio_pb2_grpc import add_ClientAppIoServicer_to_server from flwr.server.superlink.fleet.grpc_bidi.grpc_server import generic_create_grpc_server -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .clientapp.clientappio_servicer import ClientAppInputs, ClientAppIoServicer from .grpc_adapter_client.connection import grpc_adapter from .grpc_client.connection import grpc_connection from .grpc_rere_client.connection import grpc_request_response from .message_handler.message_handler import handle_control_message -from .node_state import NodeState from .numpy_client import NumPyClient - -ADDRESS_CLIENTAPPIO_API_GRPC_RERE = "0.0.0.0:9094" - -ISOLATION_MODE_SUBPROCESS = "subprocess" -ISOLATION_MODE_PROCESS = "process" +from .run_info_store import DeprecatedRunInfoStore def _check_actionable_client( @@ -95,7 +94,7 @@ def start_client( insecure: Optional[bool] = None, transport: Optional[str] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, @@ -132,6 +131,11 @@ class `flwr.client.Client` (default: None) - 'grpc-bidi': gRPC, bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) - 'rest': HTTP (experimental) + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Tuple containing the elliptic curve private key and public key for + authentication from the cryptography library. + Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ + Used to establish an authenticated connection with the server. max_retries: Optional[int] (default: None) The maximum number of times the client will try to connect to the server before giving up in case of a connection error. If set to None, @@ -197,7 +201,7 @@ def start_client_internal( *, server_address: str, node_config: UserConfig, - load_client_app_fn: Optional[Callable[[str, str], ClientApp]] = None, + load_client_app_fn: Optional[Callable[[str, str, str], ClientApp]] = None, client_fn: Optional[ClientFnExt] = None, client: Optional[Client] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, @@ -205,13 +209,13 @@ def start_client_internal( insecure: Optional[bool] = None, transport: Optional[str] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, max_retries: Optional[int] = None, max_wait_time: Optional[float] = None, flwr_path: Optional[Path] = None, isolation: Optional[str] = None, - supernode_address: Optional[str] = ADDRESS_CLIENTAPPIO_API_GRPC_RERE, + supernode_address: Optional[str] = CLIENTAPPIO_API_DEFAULT_ADDRESS, ) -> None: """Start a Flower client node which connects to a Flower server. @@ -249,6 +253,11 @@ class `flwr.client.Client` (default: None) - 'grpc-bidi': gRPC, bidirectional streaming - 'grpc-rere': gRPC, request-response (experimental) - 'rest': HTTP (experimental) + authentication_keys : Optional[Tuple[PrivateKey, PublicKey]] (default: None) + Tuple containing the elliptic curve private key and public key for + authentication from the cryptography library. + Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ + Used to establish an authenticated connection with the server. max_retries: Optional[int] (default: None) The maximum number of times the client will try to connect to the server before giving up in case of a connection error. If set to None, @@ -266,7 +275,7 @@ class `flwr.client.Client` (default: None) by the SueprNode and communicates using gRPC at the address `supernode_address`. If `process`, the `ClientApp` runs in a separate isolated process and communicates using gRPC at the address `supernode_address`. - supernode_address : Optional[str] (default: `ADDRESS_CLIENTAPPIO_API_GRPC_RERE`) + supernode_address : Optional[str] (default: `CLIENTAPPIO_API_DEFAULT_ADDRESS`) The SuperNode gRPC server address. """ if insecure is None: @@ -288,7 +297,7 @@ def single_client_factory( client_fn = single_client_factory - def _load_client_app(_1: str, _2: str) -> ClientApp: + def _load_client_app(_1: str, _2: str, _3: str) -> ClientApp: return ClientApp(client_fn=client_fn) load_client_app_fn = _load_client_app @@ -354,10 +363,10 @@ def _on_backoff(retry_state: RetryState) -> None: on_backoff=_on_backoff, ) - # NodeState gets initialized when the first connection is established - node_state: Optional[NodeState] = None + # DeprecatedRunInfoStore gets initialized when the first connection is established + run_info_store: Optional[DeprecatedRunInfoStore] = None - runs: Dict[int, Run] = {} + runs: dict[int, Run] = {} while not app_state_tracker.interrupt: sleep_duration: int = 0 @@ -372,7 +381,7 @@ def _on_backoff(retry_state: RetryState) -> None: receive, send, create_node, delete_node, get_run, get_fab = conn # Register node when connecting the first time - if node_state is None: + if run_info_store is None: if create_node is None: if transport not in ["grpc-bidi", None]: raise NotImplementedError( @@ -381,7 +390,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) # gRPC-bidi doesn't have the concept of node_id, # so we set it to -1 - node_state = NodeState( + run_info_store = DeprecatedRunInfoStore( node_id=-1, node_config={}, ) @@ -392,7 +401,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) # pylint: disable=not-callable if node_id is None: raise ValueError("Node registration failed") - node_state = NodeState( + run_info_store = DeprecatedRunInfoStore( node_id=node_id, node_config=node_config, ) @@ -451,7 +460,7 @@ def _on_backoff(retry_state: RetryState) -> None: run.fab_id, run.fab_version = fab_id, fab_version # Register context for this run - node_state.register_context( + run_info_store.register_context( run_id=run_id, run=run, flwr_path=flwr_path, @@ -459,7 +468,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) # Retrieve context for this run - context = node_state.retrieve_context(run_id=run_id) + context = run_info_store.retrieve_context(run_id=run_id) # Create an error reply message that will never be used to prevent # the used-before-assignment linting error reply_message = message.create_error_reply( @@ -519,7 +528,7 @@ def _on_backoff(retry_state: RetryState) -> None: else: # Load ClientApp instance client_app: ClientApp = load_client_app_fn( - fab_id, fab_version + fab_id, fab_version, run.fab_hash ) # Execute ClientApp @@ -532,7 +541,7 @@ def _on_backoff(retry_state: RetryState) -> None: # Raise exception, crash process raise ex - # Don't update/change NodeState + # Don't update/change DeprecatedRunInfoStore e_code = ErrorCode.CLIENT_APP_RAISED_EXCEPTION # Ex fmt: ":<'division by zero'>" @@ -557,7 +566,7 @@ def _on_backoff(retry_state: RetryState) -> None: ) else: # No exception, update node state - node_state.update_context( + run_info_store.update_context( run_id=run_id, context=context, ) @@ -690,7 +699,7 @@ def start_numpy_client( ) -def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ +def _init_connection(transport: Optional[str], server_address: str) -> tuple[ Callable[ [ str, @@ -698,10 +707,10 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ RetryInvoker, int, Union[bytes, str, None], - Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], + Optional[tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]], ], - ContextManager[ - Tuple[ + AbstractContextManager[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], @@ -712,7 +721,7 @@ def _init_connection(transport: Optional[str], server_address: str) -> Tuple[ ], ], str, - Type[Exception], + type[Exception], ]: # Parse IP address parsed_address = parse_address(server_address) @@ -770,7 +779,7 @@ def signal_handler(sig, frame): # type: ignore signal.signal(signal.SIGTERM, signal_handler) -def run_clientappio_api_grpc(address: str) -> Tuple[grpc.Server, ClientAppIoServicer]: +def run_clientappio_api_grpc(address: str) -> tuple[grpc.Server, ClientAppIoServicer]: """Run ClientAppIo API gRPC server.""" clientappio_servicer: grpc.Server = ClientAppIoServicer() clientappio_add_servicer_to_server_fn = add_ClientAppIoServicer_to_server diff --git a/src/py/flwr/client/app_test.py b/src/py/flwr/client/app_test.py index 74ade03f973a..723a066ea0bc 100644 --- a/src/py/flwr/client/app_test.py +++ b/src/py/flwr/client/app_test.py @@ -15,8 +15,6 @@ """Flower Client app tests.""" -from typing import Dict, Tuple - from flwr.common import ( Config, EvaluateIns, @@ -59,7 +57,7 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: class NeedsWrappingClient(NumPyClient): """Client implementation extending the high-level NumPyClient.""" - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() @@ -69,13 +67,13 @@ def get_parameters(self, config: Config) -> NDArrays: def fit( self, parameters: NDArrays, config: Config - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() def evaluate( self, parameters: NDArrays, config: Config - ) -> Tuple[float, int, Dict[str, Scalar]]: + ) -> tuple[float, int, dict[str, Scalar]]: """Raise an Exception because this method is not expected to be called.""" raise NotImplementedError() diff --git a/src/py/flwr/client/client_app.py b/src/py/flwr/client/client_app.py index 2a913b3a248d..234d84f27782 100644 --- a/src/py/flwr/client/client_app.py +++ b/src/py/flwr/client/client_app.py @@ -16,7 +16,7 @@ import inspect -from typing import Callable, List, Optional +from typing import Callable, Optional from flwr.client.client import Client from flwr.client.message_handler.message_handler import ( @@ -41,11 +41,11 @@ def _alert_erroneous_client_fn() -> None: def _inspect_maybe_adapt_client_fn_signature(client_fn: ClientFnExt) -> ClientFnExt: client_fn_args = inspect.signature(client_fn).parameters - first_arg = list(client_fn_args.keys())[0] if len(client_fn_args) != 1: _alert_erroneous_client_fn() + first_arg = list(client_fn_args.keys())[0] first_arg_type = client_fn_args[first_arg].annotation if first_arg_type is str or first_arg == "cid": @@ -109,9 +109,9 @@ class ClientApp: def __init__( self, client_fn: Optional[ClientFnExt] = None, # Only for backward compatibility - mods: Optional[List[Mod]] = None, + mods: Optional[list[Mod]] = None, ) -> None: - self._mods: List[Mod] = mods if mods is not None else [] + self._mods: list[Mod] = mods if mods is not None else [] # Create wrapper function for `handle` self._call: Optional[ClientAppCallable] = None @@ -263,7 +263,7 @@ def _registration_error(fn_name: str) -> ValueError: >>> class FlowerClient(NumPyClient): >>> # ... >>> - >>> def client_fn(cid) -> Client: + >>> def client_fn(context: Context): >>> return FlowerClient().to_client() >>> >>> app = ClientApp( diff --git a/src/py/flwr/client/clientapp/app.py b/src/py/flwr/client/clientapp/app.py index 69d334fead14..52be2a4b6dc1 100644 --- a/src/py/flwr/client/clientapp/app.py +++ b/src/py/flwr/client/clientapp/app.py @@ -17,7 +17,7 @@ import argparse import time from logging import DEBUG, ERROR, INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -133,7 +133,9 @@ def run_clientapp( # pylint: disable=R0914 try: # Load ClientApp - client_app: ClientApp = load_client_app_fn(run.fab_id, run.fab_version) + client_app: ClientApp = load_client_app_fn( + run.fab_id, run.fab_version, fab.hash_str if fab else "" + ) # Execute ClientApp reply_message = client_app(message=message, context=context) @@ -196,7 +198,7 @@ def get_token(stub: grpc.Channel) -> Optional[int]: def pull_message( stub: grpc.Channel, token: int -) -> Tuple[Message, Context, Run, Optional[Fab]]: +) -> tuple[Message, Context, Run, Optional[Fab]]: """Pull message from SuperNode to ClientApp.""" log(INFO, "Pulling ClientAppInputs for token %s", token) try: diff --git a/src/py/flwr/client/clientapp/clientappio_servicer_test.py b/src/py/flwr/client/clientapp/clientappio_servicer_test.py index a03400c12a86..82c9f16e8201 100644 --- a/src/py/flwr/client/clientapp/clientappio_servicer_test.py +++ b/src/py/flwr/client/clientapp/clientappio_servicer_test.py @@ -36,7 +36,7 @@ ) from flwr.proto.message_pb2 import Context as ProtoContext from flwr.proto.run_pb2 import Run as ProtoRun -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .clientappio_servicer import ClientAppInputs, ClientAppIoServicer, ClientAppOutputs diff --git a/src/py/flwr/client/clientapp/utils.py b/src/py/flwr/client/clientapp/utils.py index d2386dd707c3..f7261c015b14 100644 --- a/src/py/flwr/client/clientapp/utils.py +++ b/src/py/flwr/client/clientapp/utils.py @@ -34,7 +34,7 @@ def get_load_client_app_fn( app_path: Optional[str], multi_app: bool, flwr_dir: Optional[str] = None, -) -> Callable[[str, str], ClientApp]: +) -> Callable[[str, str, str], ClientApp]: """Get the load_client_app_fn function. If `multi_app` is True, this function loads the specified ClientApp @@ -55,13 +55,14 @@ def get_load_client_app_fn( if not valid and error_msg: raise LoadClientAppError(error_msg) from None - def _load(fab_id: str, fab_version: str) -> ClientApp: + def _load(fab_id: str, fab_version: str, fab_hash: str) -> ClientApp: runtime_app_dir = Path(app_path if app_path else "").absolute() # If multi-app feature is disabled if not multi_app: # Set app reference client_app_ref = default_app_ref - # If multi-app feature is enabled but app directory is provided + # If multi-app feature is enabled but app directory is provided. + # `fab_hash` is not required since the app is loaded from `runtime_app_dir`. elif app_path is not None: config = get_project_config(runtime_app_dir) this_fab_version, this_fab_id = get_metadata_from_config(config) @@ -81,11 +82,16 @@ def _load(fab_id: str, fab_version: str) -> ClientApp: else: try: runtime_app_dir = get_project_dir( - fab_id, fab_version, get_flwr_dir(flwr_dir) + fab_id, fab_version, fab_hash, get_flwr_dir(flwr_dir) ) config = get_project_config(runtime_app_dir) except Exception as e: - raise LoadClientAppError("Failed to load ClientApp") from e + raise LoadClientAppError( + "Failed to load ClientApp." + "Possible reasons for error include mismatched " + "`fab_id`, `fab_version`, or `fab_hash` in " + f"{str(get_flwr_dir(flwr_dir).resolve())}." + ) from e # Set app reference client_app_ref = config["tool"]["flwr"]["app"]["components"]["clientapp"] diff --git a/src/py/flwr/client/dpfedavg_numpy_client.py b/src/py/flwr/client/dpfedavg_numpy_client.py index c592d10936d5..bade811b48ce 100644 --- a/src/py/flwr/client/dpfedavg_numpy_client.py +++ b/src/py/flwr/client/dpfedavg_numpy_client.py @@ -16,7 +16,6 @@ import copy -from typing import Dict, Tuple import numpy as np @@ -39,7 +38,7 @@ def __init__(self, client: NumPyClient) -> None: super().__init__() self.client = client - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Get client properties using the given Numpy client. Parameters @@ -58,7 +57,7 @@ def get_properties(self, config: Config) -> Dict[str, Scalar]: """ return self.client.get_properties(config) - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + def get_parameters(self, config: dict[str, Scalar]) -> NDArrays: """Return the current local model parameters. Parameters @@ -76,8 +75,8 @@ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: return self.client.get_parameters(config) def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Train the provided parameters using the locally held dataset. This method first updates the local model using the original parameters @@ -153,8 +152,8 @@ def fit( return updated_params, num_examples, metrics def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Evaluate the provided parameters using the locally held dataset. Parameters diff --git a/src/py/flwr/client/grpc_adapter_client/connection.py b/src/py/flwr/client/grpc_adapter_client/connection.py index f9f7b1043524..ab823112bbe1 100644 --- a/src/py/flwr/client/grpc_adapter_client/connection.py +++ b/src/py/flwr/client/grpc_adapter_client/connection.py @@ -15,9 +15,10 @@ """Contextmanager for a GrpcAdapter channel to the Flower server.""" +from collections.abc import Iterator from contextlib import contextmanager from logging import ERROR -from typing import Callable, Iterator, Optional, Tuple, Union +from typing import Callable, Optional, Union from cryptography.hazmat.primitives.asymmetric import ec @@ -31,17 +32,17 @@ @contextmanager -def grpc_adapter( # pylint: disable=R0913 +def grpc_adapter( # pylint: disable=R0913,too-many-positional-arguments server_address: str, insecure: bool, retry_invoker: RetryInvoker, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], diff --git a/src/py/flwr/client/grpc_client/connection.py b/src/py/flwr/client/grpc_client/connection.py index 489891f55436..75d2ebe15025 100644 --- a/src/py/flwr/client/grpc_client/connection.py +++ b/src/py/flwr/client/grpc_client/connection.py @@ -16,11 +16,12 @@ import uuid +from collections.abc import Iterator from contextlib import contextmanager from logging import DEBUG, ERROR from pathlib import Path from queue import Queue -from typing import Callable, Iterator, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast from cryptography.hazmat.primitives.asymmetric import ec @@ -59,17 +60,17 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_connection( # pylint: disable=R0913, R0915 +def grpc_connection( # pylint: disable=R0913,R0915,too-many-positional-arguments server_address: str, insecure: bool, retry_invoker: RetryInvoker, # pylint: disable=unused-argument max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], diff --git a/src/py/flwr/client/grpc_client/connection_test.py b/src/py/flwr/client/grpc_client/connection_test.py index bd377ef3470a..13bd2c6af8e7 100644 --- a/src/py/flwr/client/grpc_client/connection_test.py +++ b/src/py/flwr/client/grpc_client/connection_test.py @@ -17,8 +17,9 @@ import concurrent.futures import socket +from collections.abc import Iterator from contextlib import closing -from typing import Iterator, cast +from typing import cast from unittest.mock import patch import grpc diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor.py b/src/py/flwr/client/grpc_rere_client/client_interceptor.py index d2dded8a73d9..041860957db7 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor.py @@ -17,17 +17,21 @@ import base64 import collections -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from collections.abc import Sequence +from logging import WARNING +from typing import Any, Callable, Optional, Union import grpc from cryptography.hazmat.primitives.asymmetric import ec +from flwr.common.logger import log from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( bytes_to_public_key, compute_hmac, generate_shared_key, public_key_to_bytes, ) +from flwr.proto.fab_pb2 import GetFabRequest # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, DeleteNodeRequest, @@ -47,11 +51,12 @@ PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ] def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -123,18 +128,18 @@ def intercept_unary_unary( PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ), ): if self.shared_secret is None: raise RuntimeError("Failure to compute hmac") + message_bytes = request.SerializeToString(deterministic=True) metadata.append( ( _AUTH_TOKEN_HEADER, base64.urlsafe_b64encode( - compute_hmac( - self.shared_secret, request.SerializeToString(True) - ) + compute_hmac(self.shared_secret, message_bytes) ), ) ) @@ -151,8 +156,15 @@ def intercept_unary_unary( server_public_key_bytes = base64.urlsafe_b64decode( _get_value_from_tuples(_PUBLIC_KEY_HEADER, response.initial_metadata()) ) - self.server_public_key = bytes_to_public_key(server_public_key_bytes) - self.shared_secret = generate_shared_key( - self.private_key, self.server_public_key - ) + + if server_public_key_bytes != b"": + self.server_public_key = bytes_to_public_key(server_public_key_bytes) + else: + log(WARNING, "Can't get server public key, SuperLink may be offline") + + if self.server_public_key is not None: + self.shared_secret = generate_shared_key( + self.private_key, self.server_public_key + ) + return response diff --git a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py index 79416a8eb31b..a029b926423f 100644 --- a/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py +++ b/src/py/flwr/client/grpc_rere_client/client_interceptor_test.py @@ -16,11 +16,13 @@ import base64 +import inspect import threading import unittest +from collections.abc import Sequence from concurrent import futures from logging import DEBUG, INFO, WARN -from typing import Optional, Sequence, Tuple, Union +from typing import Optional, Union, get_args import grpc @@ -46,6 +48,7 @@ PushTaskResRequest, PushTaskResResponse, ) +from flwr.proto.fleet_pb2_grpc import FleetServicer from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns # pylint: disable=E0611 @@ -60,7 +63,7 @@ def __init__(self) -> None: """Initialize mock servicer.""" self._lock = threading.Lock() self._received_client_metadata: Optional[ - Sequence[Tuple[str, Union[str, bytes]]] + Sequence[tuple[str, Union[str, bytes]]] ] = None self.server_private_key, self.server_public_key = generate_key_pairs() self._received_message_bytes: bytes = b"" @@ -73,7 +76,7 @@ def unary_unary( """Handle unary call.""" with self._lock: self._received_client_metadata = context.invocation_metadata() - self._received_message_bytes = request.SerializeToString(True) + self._received_message_bytes = request.SerializeToString(deterministic=True) if isinstance(request, CreateNodeRequest): context.send_initial_metadata( @@ -105,7 +108,7 @@ def unary_unary( def received_client_metadata( self, - ) -> Optional[Sequence[Tuple[str, Union[str, bytes]]]]: + ) -> Optional[Sequence[tuple[str, Union[str, bytes]]]]: """Return received client metadata.""" with self._lock: return self._received_client_metadata @@ -151,7 +154,7 @@ def _add_generic_handler(servicer: _MockServicer, server: grpc.Server) -> None: def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -164,7 +167,7 @@ def _init_retry_invoker() -> RetryInvoker: return RetryInvoker( wait_gen_factory=exponential, recoverable_exceptions=grpc.RpcError, - max_tries=None, + max_tries=1, max_time=None, on_giveup=lambda retry_state: ( log( @@ -415,6 +418,41 @@ def test_client_auth_get_run(self) -> None: assert actual_public_key == expected_public_key assert actual_hmac == expected_hmac + def test_without_servicer(self) -> None: + """Test client authentication without servicer.""" + # Prepare + self._server.stop(grace=None) + retry_invoker = _init_retry_invoker() + + # Execute and Assert + with self._connection( + self._address, + True, + retry_invoker, + GRPC_MAX_MESSAGE_LENGTH, + None, + (self._client_private_key, self._client_public_key), + ) as conn: + _, _, create_node, _, _, _ = conn + assert create_node is not None + create_node() + + assert self._servicer.received_client_metadata() is None + + def test_fleet_requests_included(self) -> None: + """Test if all Fleet requests are included in the authentication mode.""" + # Prepare + requests = get_args(Request) + rpc_names = {req.__qualname__.removesuffix("Request") for req in requests} + expected_rpc_names = { + name + for name, ref in inspect.getmembers(FleetServicer) + if inspect.isfunction(ref) + } + + # Assert + assert expected_rpc_names == rpc_names + if __name__ == "__main__": unittest.main(verbosity=2) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index 8bae253c819a..bfc20eee896a 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -17,11 +17,12 @@ import random import threading +from collections.abc import Iterator, Sequence from contextlib import contextmanager from copy import copy from logging import DEBUG, ERROR from pathlib import Path -from typing import Callable, Iterator, Optional, Sequence, Tuple, Type, Union, cast +from typing import Callable, Optional, Union, cast import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -70,18 +71,18 @@ def on_channel_state_change(channel_connectivity: str) -> None: @contextmanager -def grpc_request_response( # pylint: disable=R0913, R0914, R0915 +def grpc_request_response( # pylint: disable=R0913,R0914,R0915,R0917 server_address: str, insecure: bool, retry_invoker: RetryInvoker, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, # pylint: disable=W0613 root_certificates: Optional[Union[bytes, str]] = None, authentication_keys: Optional[ - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, - adapter_cls: Optional[Union[Type[FleetStub], Type[GrpcAdapter]]] = None, + adapter_cls: Optional[Union[type[FleetStub], type[GrpcAdapter]]] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], @@ -119,6 +120,9 @@ def grpc_request_response( # pylint: disable=R0913, R0914, R0915 authentication from the cryptography library. Source: https://cryptography.io/en/latest/hazmat/primitives/asymmetric/ec/ Used to establish an authenticated connection with the server. + adapter_cls: Optional[Union[type[FleetStub], type[GrpcAdapter]]] (default: None) + A GrpcStub Class that can be used to send messages. By default the FleetStub + will be used. Returns ------- @@ -268,7 +272,7 @@ def send(message: Message) -> None: task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - request = PushTaskResRequest(task_res_list=[task_res]) + request = PushTaskResRequest(node=node, task_res_list=[task_res]) _ = retry_invoker.invoke(stub.PushTaskRes, request) # Cleanup @@ -276,7 +280,7 @@ def send(message: Message) -> None: def get_run(run_id: int) -> Run: # Call FleetAPI - get_run_request = GetRunRequest(run_id=run_id) + get_run_request = GetRunRequest(node=node, run_id=run_id) get_run_response: GetRunResponse = retry_invoker.invoke( stub.GetRun, request=get_run_request, @@ -293,7 +297,7 @@ def get_run(run_id: int) -> Run: def get_fab(fab_hash: str) -> Fab: # Call FleetAPI - get_fab_request = GetFabRequest(hash_str=fab_hash) + get_fab_request = GetFabRequest(node=node, hash_str=fab_hash) get_fab_response: GetFabResponse = retry_invoker.invoke( stub.GetFab, request=get_fab_request, diff --git a/src/py/flwr/client/grpc_rere_client/grpc_adapter.py b/src/py/flwr/client/grpc_rere_client/grpc_adapter.py index fde03943a852..69ea29d5b7b3 100644 --- a/src/py/flwr/client/grpc_rere_client/grpc_adapter.py +++ b/src/py/flwr/client/grpc_rere_client/grpc_adapter.py @@ -17,17 +17,21 @@ import sys from logging import DEBUG -from typing import Any, Type, TypeVar, cast +from typing import Any, TypeVar, cast import grpc from google.protobuf.message import Message as GrpcMessage from flwr.common import log from flwr.common.constant import ( + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY, GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY, GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY, ) -from flwr.common.version import package_version +from flwr.common.version import package_name, package_version from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, @@ -59,12 +63,19 @@ def __init__(self, channel: grpc.Channel) -> None: self.stub = GrpcAdapterStub(channel) def _send_and_receive( - self, request: GrpcMessage, response_type: Type[T], **kwargs: Any + self, request: GrpcMessage, response_type: type[T], **kwargs: Any ) -> T: # Serialize request + req_cls = request.__class__ container_req = MessageContainer( - metadata={GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY: package_version}, - grpc_message_name=request.__class__.__qualname__, + metadata={ + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY: package_name, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY: req_cls.__module__, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY: req_cls.__qualname__, + }, + grpc_message_name=req_cls.__qualname__, grpc_message_content=request.SerializeToString(), ) diff --git a/src/py/flwr/client/message_handler/message_handler.py b/src/py/flwr/client/message_handler/message_handler.py index 1ab84eb01468..765c6a6b2e91 100644 --- a/src/py/flwr/client/message_handler/message_handler.py +++ b/src/py/flwr/client/message_handler/message_handler.py @@ -15,7 +15,7 @@ """Client-side message handler.""" from logging import WARN -from typing import Optional, Tuple, cast +from typing import Optional, cast from flwr.client.client import ( maybe_call_evaluate, @@ -52,7 +52,7 @@ class UnknownServerMessage(Exception): """Exception indicating that the received message is unknown.""" -def handle_control_message(message: Message) -> Tuple[Optional[Message], int]: +def handle_control_message(message: Message) -> tuple[Optional[Message], int]: """Handle control part of the incoming message. Parameters @@ -147,7 +147,7 @@ def handle_legacy_message_from_msgtype( def _reconnect( reconnect_msg: ServerMessage.ReconnectIns, -) -> Tuple[ClientMessage, int]: +) -> tuple[ClientMessage, int]: # Determine the reason for sending DisconnectRes message reason = Reason.ACK sleep_duration = None diff --git a/src/py/flwr/client/message_handler/message_handler_test.py b/src/py/flwr/client/message_handler/message_handler_test.py index 557d61ffb32a..311f8c37e1b1 100644 --- a/src/py/flwr/client/message_handler/message_handler_test.py +++ b/src/py/flwr/client/message_handler/message_handler_test.py @@ -19,7 +19,6 @@ import unittest import uuid from copy import copy -from typing import List from flwr.client import Client from flwr.client.typing import ClientFnExt @@ -294,7 +293,7 @@ def test_invalid_message_run_id(self) -> None: msg = Message(metadata=self.valid_out_metadata, content=RecordSet()) # Execute - invalid_metadata_list: List[Metadata] = [] + invalid_metadata_list: list[Metadata] = [] attrs = list(vars(self.valid_out_metadata).keys()) for attr in attrs: if attr == "_partition_id": diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py index 5b196ad84321..f9d3c433157d 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod.py @@ -18,7 +18,7 @@ import os from dataclasses import dataclass, field from logging import DEBUG, WARNING -from typing import Any, Dict, List, Tuple, cast +from typing import Any, cast from flwr.client.typing import ClientAppCallable from flwr.common import ( @@ -91,11 +91,11 @@ class SecAggPlusState: # Random seed for generating the private mask rd_seed: bytes = b"" - rd_seed_share_dict: Dict[int, bytes] = field(default_factory=dict) - sk1_share_dict: Dict[int, bytes] = field(default_factory=dict) + rd_seed_share_dict: dict[int, bytes] = field(default_factory=dict) + sk1_share_dict: dict[int, bytes] = field(default_factory=dict) # The dict of the shared secrets from sk2 - ss2_dict: Dict[int, bytes] = field(default_factory=dict) - public_keys_dict: Dict[int, Tuple[bytes, bytes]] = field(default_factory=dict) + ss2_dict: dict[int, bytes] = field(default_factory=dict) + public_keys_dict: dict[int, tuple[bytes, bytes]] = field(default_factory=dict) def __init__(self, **kwargs: ConfigsRecordValues) -> None: for k, v in kwargs.items(): @@ -104,8 +104,8 @@ def __init__(self, **kwargs: ConfigsRecordValues) -> None: new_v: Any = v if k.endswith(":K"): k = k[:-2] - keys = cast(List[int], v) - values = cast(List[bytes], kwargs[f"{k}:V"]) + keys = cast(list[int], v) + values = cast(list[bytes], kwargs[f"{k}:V"]) if len(values) > len(keys): updated_values = [ tuple(values[i : i + 2]) for i in range(0, len(values), 2) @@ -115,17 +115,17 @@ def __init__(self, **kwargs: ConfigsRecordValues) -> None: new_v = dict(zip(keys, values)) self.__setattr__(k, new_v) - def to_dict(self) -> Dict[str, ConfigsRecordValues]: + def to_dict(self) -> dict[str, ConfigsRecordValues]: """Convert the state to a dictionary.""" ret = vars(self) for k in list(ret.keys()): if isinstance(ret[k], dict): # Replace dict with two lists - v = cast(Dict[str, Any], ret.pop(k)) + v = cast(dict[str, Any], ret.pop(k)) ret[f"{k}:K"] = list(v.keys()) if k == "public_keys_dict": - v_list: List[bytes] = [] - for b1_b2 in cast(List[Tuple[bytes, bytes]], v.values()): + v_list: list[bytes] = [] + for b1_b2 in cast(list[tuple[bytes, bytes]], v.values()): v_list.extend(b1_b2) ret[f"{k}:V"] = v_list else: @@ -276,7 +276,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: ) if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], configs[key]) + for elm in cast(list[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -299,7 +299,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: ) if not isinstance(configs[key], list) or any( elm - for elm in cast(List[Any], configs[key]) + for elm in cast(list[Any], configs[key]) # pylint: disable-next=unidiomatic-typecheck if type(elm) is not expected_type ): @@ -314,7 +314,7 @@ def check_configs(stage: str, configs: ConfigsRecord) -> None: def _setup( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: # Assigning parameter values to object fields sec_agg_param_dict = configs state.sample_num = cast(int, sec_agg_param_dict[Key.SAMPLE_NUMBER]) @@ -350,8 +350,8 @@ def _setup( # pylint: disable-next=too-many-locals def _share_keys( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: - named_bytes_tuples = cast(Dict[str, Tuple[bytes, bytes]], configs) +) -> dict[str, ConfigsRecordValues]: + named_bytes_tuples = cast(dict[str, tuple[bytes, bytes]], configs) key_dict = {int(sid): (pk1, pk2) for sid, (pk1, pk2) in named_bytes_tuples.items()} log(DEBUG, "Node %d: starting stage 1...", state.nid) state.public_keys_dict = key_dict @@ -361,7 +361,7 @@ def _share_keys( raise ValueError("Available neighbours number smaller than threshold") # Check if all public keys are unique - pk_list: List[bytes] = [] + pk_list: list[bytes] = [] for pk1, pk2 in state.public_keys_dict.values(): pk_list.append(pk1) pk_list.append(pk2) @@ -415,11 +415,11 @@ def _collect_masked_vectors( configs: ConfigsRecord, num_examples: int, updated_parameters: Parameters, -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 2...", state.nid) - available_clients: List[int] = [] - ciphertexts = cast(List[bytes], configs[Key.CIPHERTEXT_LIST]) - srcs = cast(List[int], configs[Key.SOURCE_LIST]) + available_clients: list[int] = [] + ciphertexts = cast(list[bytes], configs[Key.CIPHERTEXT_LIST]) + srcs = cast(list[int], configs[Key.SOURCE_LIST]) if len(ciphertexts) + 1 < state.threshold: raise ValueError("Not enough available neighbour clients.") @@ -467,7 +467,7 @@ def _collect_masked_vectors( quantized_parameters = factor_combine(q_ratio, quantized_parameters) - dimensions_list: List[Tuple[int, ...]] = [a.shape for a in quantized_parameters] + dimensions_list: list[tuple[int, ...]] = [a.shape for a in quantized_parameters] # Add private mask private_mask = pseudo_rand_gen(state.rd_seed, state.mod_range, dimensions_list) @@ -499,11 +499,11 @@ def _collect_masked_vectors( def _unmask( state: SecAggPlusState, configs: ConfigsRecord -) -> Dict[str, ConfigsRecordValues]: +) -> dict[str, ConfigsRecordValues]: log(DEBUG, "Node %d: starting stage 3...", state.nid) - active_nids = cast(List[int], configs[Key.ACTIVE_NODE_ID_LIST]) - dead_nids = cast(List[int], configs[Key.DEAD_NODE_ID_LIST]) + active_nids = cast(list[int], configs[Key.ACTIVE_NODE_ID_LIST]) + dead_nids = cast(list[int], configs[Key.DEAD_NODE_ID_LIST]) # Send private mask seed share for every avaliable client (including itself) # Send first private key share for building pairwise mask for every dropped client if len(active_nids) < state.threshold: diff --git a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py index 2832576fb4fc..e68bf5177797 100644 --- a/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py +++ b/src/py/flwr/client/mod/secure_aggregation/secaggplus_mod_test.py @@ -16,7 +16,7 @@ import unittest from itertools import product -from typing import Callable, Dict, List +from typing import Callable from flwr.client.mod import make_ffn from flwr.common import ( @@ -41,7 +41,7 @@ def get_test_handler( ctxt: Context, -) -> Callable[[Dict[str, ConfigsRecordValues]], ConfigsRecord]: +) -> Callable[[dict[str, ConfigsRecordValues]], ConfigsRecord]: """.""" def empty_ffn(_msg: Message, _2: Context) -> Message: @@ -49,7 +49,7 @@ def empty_ffn(_msg: Message, _2: Context) -> Message: app = make_ffn(empty_ffn, [secaggplus_mod]) - def func(configs: Dict[str, ConfigsRecordValues]) -> ConfigsRecord: + def func(configs: dict[str, ConfigsRecordValues]) -> ConfigsRecord: in_msg = Message( metadata=Metadata( run_id=0, @@ -158,7 +158,7 @@ def test_stage_setup_check(self) -> None: (Key.MOD_RANGE, int), ] - type_to_test_value: Dict[type, ConfigsRecordValues] = { + type_to_test_value: dict[type, ConfigsRecordValues] = { int: 10, bool: True, float: 1.0, @@ -166,7 +166,7 @@ def test_stage_setup_check(self) -> None: bytes: b"test", } - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { key: type_to_test_value[value_type] for key, value_type in valid_key_type_pairs } @@ -208,7 +208,7 @@ def test_stage_share_keys_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { "1": [b"public key 1", b"public key 2"], "2": [b"public key 1", b"public key 2"], "3": [b"public key 1", b"public key 2"], @@ -225,7 +225,7 @@ def test_stage_share_keys_check(self) -> None: valid_configs[Key.STAGE] = Stage.SHARE_KEYS # Test invalid configs - invalid_values: List[ConfigsRecordValues] = [ + invalid_values: list[ConfigsRecordValues] = [ b"public key 1", [b"public key 1"], [b"public key 1", b"public key 2", b"public key 3"], @@ -245,7 +245,7 @@ def test_stage_collect_masked_vectors_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { Key.CIPHERTEXT_LIST: [b"ctxt!", b"ctxt@", b"ctxt#", b"ctxt?"], Key.SOURCE_LIST: [32, 51324, 32324123, -3], } @@ -289,7 +289,7 @@ def test_stage_unmask_check(self) -> None: handler = get_test_handler(ctxt) set_stage = _make_set_state_fn(ctxt) - valid_configs: Dict[str, ConfigsRecordValues] = { + valid_configs: dict[str, ConfigsRecordValues] = { Key.ACTIVE_NODE_ID_LIST: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], Key.DEAD_NODE_ID_LIST: [32, 51324, 32324123, -3], } diff --git a/src/py/flwr/client/mod/utils.py b/src/py/flwr/client/mod/utils.py index c8fb21379783..c76902cf263f 100644 --- a/src/py/flwr/client/mod/utils.py +++ b/src/py/flwr/client/mod/utils.py @@ -15,13 +15,11 @@ """Utility functions for mods.""" -from typing import List - from flwr.client.typing import ClientAppCallable, Mod from flwr.common import Context, Message -def make_ffn(ffn: ClientAppCallable, mods: List[Mod]) -> ClientAppCallable: +def make_ffn(ffn: ClientAppCallable, mods: list[Mod]) -> ClientAppCallable: """.""" def wrap_ffn(_ffn: ClientAppCallable, _mod: Mod) -> ClientAppCallable: diff --git a/src/py/flwr/client/mod/utils_test.py b/src/py/flwr/client/mod/utils_test.py index a5bbd0a0bb4d..e75fb5530b2c 100644 --- a/src/py/flwr/client/mod/utils_test.py +++ b/src/py/flwr/client/mod/utils_test.py @@ -16,7 +16,7 @@ import unittest -from typing import List, cast +from typing import cast from flwr.client.typing import ClientAppCallable, Mod from flwr.common import ( @@ -43,7 +43,7 @@ def _increment_context_counter(context: Context) -> None: context.state.metrics_records[METRIC] = MetricsRecord({COUNTER: current_counter}) -def make_mock_mod(name: str, footprint: List[str]) -> Mod: +def make_mock_mod(name: str, footprint: list[str]) -> Mod: """Make a mock mod.""" def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: @@ -61,7 +61,7 @@ def mod(message: Message, context: Context, app: ClientAppCallable) -> Message: return mod -def make_mock_app(name: str, footprint: List[str]) -> ClientAppCallable: +def make_mock_app(name: str, footprint: list[str]) -> ClientAppCallable: """Make a mock app.""" def app(message: Message, context: Context) -> Message: @@ -97,7 +97,7 @@ class TestMakeApp(unittest.TestCase): def test_multiple_mods(self) -> None: """Test if multiple mods are called in the correct order.""" # Prepare - footprint: List[str] = [] + footprint: list[str] = [] mock_app = make_mock_app("app", footprint) mock_mod_names = [f"mod{i}" for i in range(1, 15)] mock_mods = [make_mock_mod(name, footprint) for name in mock_mod_names] @@ -127,7 +127,7 @@ def test_multiple_mods(self) -> None: def test_filter(self) -> None: """Test if a mod can filter incoming TaskIns.""" # Prepare - footprint: List[str] = [] + footprint: list[str] = [] mock_app = make_mock_app("app", footprint) context = Context(node_id=0, node_config={}, state=RecordSet(), run_config={}) message = _get_dummy_flower_message() diff --git a/src/py/flwr/client/node_state_tests.py b/src/py/flwr/client/node_state_tests.py index 26ac4fea6855..06ceb80a94ad 100644 --- a/src/py/flwr/client/node_state_tests.py +++ b/src/py/flwr/client/node_state_tests.py @@ -17,7 +17,7 @@ from typing import cast -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import ConfigsRecord, Context from flwr.proto.task_pb2 import TaskIns # pylint: disable=E0611 @@ -34,32 +34,31 @@ def _run_dummy_task(context: Context) -> Context: def test_multirun_in_node_state() -> None: - """Test basic NodeState logic.""" + """Test basic DeprecatedRunInfoStore logic.""" # Tasks to perform tasks = [TaskIns(run_id=run_id) for run_id in [0, 1, 1, 2, 3, 2, 1, 5]] # the "tasks" is to count how many times each run is executed expected_values = {0: "1", 1: "1" * 3, 2: "1" * 2, 3: "1", 5: "1"} - # NodeState - node_state = NodeState(node_id=0, node_config={}) + node_info_store = DeprecatedRunInfoStore(node_id=0, node_config={}) for task in tasks: run_id = task.run_id # Register - node_state.register_context(run_id=run_id) + node_info_store.register_context(run_id=run_id) # Get run state - context = node_state.retrieve_context(run_id=run_id) + context = node_info_store.retrieve_context(run_id=run_id) # Run "task" updated_state = _run_dummy_task(context) # Update run state - node_state.update_context(run_id=run_id, context=updated_state) + node_info_store.update_context(run_id=run_id, context=updated_state) # Verify values - for run_id, run_info in node_state.run_infos.items(): + for run_id, run_info in node_info_store.run_infos.items(): assert ( run_info.context.state.configs_records["counter"]["count"] == expected_values[run_id] diff --git a/src/py/flwr/client/numpy_client.py b/src/py/flwr/client/numpy_client.py index b21a51b38e9b..6a656cb661d2 100644 --- a/src/py/flwr/client/numpy_client.py +++ b/src/py/flwr/client/numpy_client.py @@ -16,7 +16,7 @@ from abc import ABC -from typing import Callable, Dict, Tuple +from typing import Callable from flwr.client.client import Client from flwr.common import ( @@ -73,7 +73,7 @@ class NumPyClient(ABC): _context: Context - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return a client's set of properties. Parameters @@ -93,7 +93,7 @@ def get_properties(self, config: Config) -> Dict[str, Scalar]: _ = (self, config) return {} - def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + def get_parameters(self, config: dict[str, Scalar]) -> NDArrays: """Return the current local model parameters. Parameters @@ -112,8 +112,8 @@ def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: return [] def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Train the provided parameters using the locally held dataset. Parameters @@ -141,8 +141,8 @@ def fit( return [], 0, {} def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Evaluate the provided parameters using the locally held dataset. Parameters @@ -310,7 +310,7 @@ def _set_context(self: Client, context: Context) -> None: def _wrap_numpy_client(client: NumPyClient) -> Client: - member_dict: Dict[str, Callable] = { # type: ignore + member_dict: dict[str, Callable] = { # type: ignore "__init__": _constructor, "get_context": _get_context, "set_context": _set_context, diff --git a/src/py/flwr/client/numpy_client_test.py b/src/py/flwr/client/numpy_client_test.py index 06a0deafe2c9..c5d520a73ce1 100644 --- a/src/py/flwr/client/numpy_client_test.py +++ b/src/py/flwr/client/numpy_client_test.py @@ -15,8 +15,6 @@ """Flower NumPyClient tests.""" -from typing import Dict, Tuple - from flwr.common import Config, NDArrays, Properties, Scalar from .numpy_client import ( @@ -40,14 +38,14 @@ def get_parameters(self, config: Config) -> NDArrays: return [] def fit( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[NDArrays, int, dict[str, Scalar]]: """Simulate training by returning empty weights, 0 samples, empty metrics.""" return [], 0, {} def evaluate( - self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[float, int, Dict[str, Scalar]]: + self, parameters: NDArrays, config: dict[str, Scalar] + ) -> tuple[float, int, dict[str, Scalar]]: """Simulate evaluate by returning 0.0 loss, 0 samples, empty metrics.""" return 0.0, 0, {} diff --git a/src/py/flwr/client/rest_client/connection.py b/src/py/flwr/client/rest_client/connection.py index d5f005fbaf77..f933ae44ad06 100644 --- a/src/py/flwr/client/rest_client/connection.py +++ b/src/py/flwr/client/rest_client/connection.py @@ -18,10 +18,11 @@ import random import sys import threading +from collections.abc import Iterator from contextlib import contextmanager from copy import copy from logging import ERROR, INFO, WARN -from typing import Callable, Iterator, Optional, Tuple, Type, TypeVar, Union +from typing import Callable, Optional, TypeVar, Union from cryptography.hazmat.primitives.asymmetric import ec from google.protobuf.message import Message as GrpcMessage @@ -81,7 +82,7 @@ @contextmanager -def http_request_response( # pylint: disable=,R0913, R0914, R0915 +def http_request_response( # pylint: disable=R0913,R0914,R0915,R0917 server_address: str, insecure: bool, # pylint: disable=unused-argument retry_invoker: RetryInvoker, @@ -90,10 +91,10 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 Union[bytes, str] ] = None, # pylint: disable=unused-argument authentication_keys: Optional[ # pylint: disable=unused-argument - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ] = None, ) -> Iterator[ - Tuple[ + tuple[ Callable[[], Optional[Message]], Callable[[Message], None], Optional[Callable[[], Optional[int]]], @@ -173,7 +174,7 @@ def http_request_response( # pylint: disable=,R0913, R0914, R0915 ########################################################################### def _request( - req: GrpcMessage, res_type: Type[T], api_path: str, retry: bool = True + req: GrpcMessage, res_type: type[T], api_path: str, retry: bool = True ) -> Optional[T]: # Serialize the request req_bytes = req.SerializeToString() @@ -339,7 +340,7 @@ def send(message: Message) -> None: task_res = message_to_taskres(message) # Serialize ProtoBuf to bytes - req = PushTaskResRequest(task_res_list=[task_res]) + req = PushTaskResRequest(node=node, task_res_list=[task_res]) # Send the request res = _request(req, PushTaskResResponse, PATH_PUSH_TASK_RES) @@ -355,7 +356,7 @@ def send(message: Message) -> None: def get_run(run_id: int) -> Run: # Construct the request - req = GetRunRequest(run_id=run_id) + req = GetRunRequest(node=node, run_id=run_id) # Send the request res = _request(req, GetRunResponse, PATH_GET_RUN) @@ -372,7 +373,7 @@ def get_run(run_id: int) -> Run: def get_fab(fab_hash: str) -> Fab: # Construct the request - req = GetFabRequest(hash_str=fab_hash) + req = GetFabRequest(node=node, hash_str=fab_hash) # Send the request res = _request(req, GetFabResponse, PATH_GET_FAB) diff --git a/src/py/flwr/client/node_state.py b/src/py/flwr/client/run_info_store.py similarity index 93% rename from src/py/flwr/client/node_state.py rename to src/py/flwr/client/run_info_store.py index e16d7e34715d..6b0c3bd3a493 100644 --- a/src/py/flwr/client/node_state.py +++ b/src/py/flwr/client/run_info_store.py @@ -1,4 +1,4 @@ -# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Node state.""" +"""Deprecated Run Info Store.""" from dataclasses import dataclass from pathlib import Path -from typing import Dict, Optional +from typing import Optional from flwr.common import Context, RecordSet from flwr.common.config import ( @@ -36,7 +36,7 @@ class RunInfo: initial_run_config: UserConfig -class NodeState: +class DeprecatedRunInfoStore: """State of a node where client nodes execute runs.""" def __init__( @@ -46,9 +46,9 @@ def __init__( ) -> None: self.node_id = node_id self.node_config = node_config - self.run_infos: Dict[int, RunInfo] = {} + self.run_infos: dict[int, RunInfo] = {} - # pylint: disable=too-many-arguments + # pylint: disable=too-many-arguments,too-many-positional-arguments def register_context( self, run_id: int, diff --git a/src/py/flwr/client/supernode/app.py b/src/py/flwr/client/supernode/app.py index 8d28e69dea6e..92a3c9077f46 100644 --- a/src/py/flwr/client/supernode/app.py +++ b/src/py/flwr/client/supernode/app.py @@ -18,7 +18,7 @@ import sys from logging import DEBUG, ERROR, INFO, WARN from pathlib import Path -from typing import Optional, Tuple +from typing import Optional from cryptography.exceptions import UnsupportedAlgorithm from cryptography.hazmat.primitives.asymmetric import ec @@ -30,6 +30,9 @@ from flwr.common import EventType, event from flwr.common.config import parse_config_args from flwr.common.constant import ( + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_REST, @@ -37,25 +40,26 @@ from flwr.common.exit_handlers import register_exit_handlers from flwr.common.logger import log, warn_deprecated_feature -from ..app import ( - ISOLATION_MODE_PROCESS, - ISOLATION_MODE_SUBPROCESS, - start_client_internal, -) +from ..app import start_client_internal from ..clientapp.utils import get_load_client_app_fn -ADDRESS_FLEET_API_GRPC_RERE = "0.0.0.0:9092" - def run_supernode() -> None: """Run Flower SuperNode.""" + args = _parse_args_run_supernode().parse_args() + _warn_deprecated_server_arg(args) + log(INFO, "Starting Flower SuperNode") event(EventType.RUN_SUPERNODE_ENTER) - args = _parse_args_run_supernode().parse_args() - - _warn_deprecated_server_arg(args) + # Check if both `--flwr-dir` and `--isolation` were set + if args.flwr_dir is not None and args.isolation is not None: + log( + WARN, + "Both `--flwr-dir` and `--isolation` were specified. " + "Ignoring `--flwr-dir`.", + ) root_certificates = _get_certificates(args) load_fn = get_load_client_app_fn( @@ -77,7 +81,10 @@ def run_supernode() -> None: authentication_keys=authentication_keys, max_retries=args.max_retries, max_wait_time=args.max_wait_time, - node_config=parse_config_args([args.node_config]), + node_config=parse_config_args( + [args.node_config] if args.node_config else args.node_config + ), + flwr_path=args.flwr_dir, isolation=args.isolation, supernode_address=args.supernode_address, ) @@ -101,11 +108,11 @@ def run_client_app() -> None: def _warn_deprecated_server_arg(args: argparse.Namespace) -> None: """Warn about the deprecated argument `--server`.""" - if args.server != ADDRESS_FLEET_API_GRPC_RERE: + if args.server != FLEET_API_GRPC_RERE_DEFAULT_ADDRESS: warn = "Passing flag --server is deprecated. Use --superlink instead." warn_deprecated_feature(warn) - if args.superlink != ADDRESS_FLEET_API_GRPC_RERE: + if args.superlink != FLEET_API_GRPC_RERE_DEFAULT_ADDRESS: # if `--superlink` also passed, then # warn user that this argument overrides what was passed with `--server` log( @@ -176,12 +183,12 @@ def _parse_args_run_supernode() -> argparse.ArgumentParser: "--flwr-dir", default=None, help="""The path containing installed Flower Apps. - By default, this value is equal to: + The default directory is: - `$FLWR_HOME/` if `$FLWR_HOME` is defined - `$XDG_DATA_HOME/.flwr/` if `$XDG_DATA_HOME` is defined - `$HOME/.flwr/` in all other cases - """, + """, ) parser.add_argument( "--isolation", @@ -191,10 +198,10 @@ def _parse_args_run_supernode() -> argparse.ArgumentParser: ISOLATION_MODE_SUBPROCESS, ISOLATION_MODE_PROCESS, ], - help="Isolation mode when running `ClientApp` (optional, possible values: " - "`subprocess`, `process`). By default, `ClientApp` runs in the same process " + help="Isolation mode when running a `ClientApp` (optional, possible values: " + "`subprocess`, `process`). By default, a `ClientApp` runs in the same process " "that executes the SuperNode. Use `subprocess` to configure SuperNode to run " - "`ClientApp` in a subprocess. Use `process` to indicate that a separate " + "a `ClientApp` in a subprocess. Use `process` to indicate that a separate " "independent process gets created outside of SuperNode.", ) parser.add_argument( @@ -245,12 +252,12 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: ) parser.add_argument( "--server", - default=ADDRESS_FLEET_API_GRPC_RERE, + default=FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, help="Server address", ) parser.add_argument( "--superlink", - default=ADDRESS_FLEET_API_GRPC_RERE, + default=FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, help="SuperLink Fleet API (gRPC-rere) address (IPv4, IPv6, or a domain name)", ) parser.add_argument( @@ -290,7 +297,7 @@ def _parse_args_common(parser: argparse.ArgumentParser) -> None: def _try_setup_client_authentication( args: argparse.Namespace, -) -> Optional[Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: +) -> Optional[tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: if not args.auth_supernode_private_key and not args.auth_supernode_public_key: return None diff --git a/src/py/flwr/common/address.py b/src/py/flwr/common/address.py index 7a70925c0fc9..2b10097ccb71 100644 --- a/src/py/flwr/common/address.py +++ b/src/py/flwr/common/address.py @@ -16,12 +16,12 @@ import socket from ipaddress import ip_address -from typing import Optional, Tuple +from typing import Optional IPV6: int = 6 -def parse_address(address: str) -> Optional[Tuple[str, int, Optional[bool]]]: +def parse_address(address: str) -> Optional[tuple[str, int, Optional[bool]]]: """Parse an IP address into host, port, and version. Parameters diff --git a/src/py/flwr/common/config.py b/src/py/flwr/common/config.py index eec7cfb726b7..24ccada7509a 100644 --- a/src/py/flwr/common/config.py +++ b/src/py/flwr/common/config.py @@ -17,12 +17,17 @@ import os import re from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union, cast, get_args +from typing import Any, Optional, Union, cast, get_args import tomli from flwr.cli.config_utils import get_fab_config, validate_fields -from flwr.common.constant import APP_DIR, FAB_CONFIG_FILE, FLWR_HOME +from flwr.common.constant import ( + APP_DIR, + FAB_CONFIG_FILE, + FAB_HASH_TRUNCATION, + FLWR_HOME, +) from flwr.common.typing import Run, UserConfig, UserConfigValue @@ -39,7 +44,10 @@ def get_flwr_dir(provided_path: Optional[str] = None) -> Path: def get_project_dir( - fab_id: str, fab_version: str, flwr_dir: Optional[Union[str, Path]] = None + fab_id: str, + fab_version: str, + fab_hash: str, + flwr_dir: Optional[Union[str, Path]] = None, ) -> Path: """Return the project directory based on the given fab_id and fab_version.""" # Check the fab_id @@ -50,10 +58,14 @@ def get_project_dir( publisher, project_name = fab_id.split("/") if flwr_dir is None: flwr_dir = get_flwr_dir() - return Path(flwr_dir) / APP_DIR / publisher / project_name / fab_version + return ( + Path(flwr_dir) + / APP_DIR + / f"{publisher}.{project_name}.{fab_version}.{fab_hash[:FAB_HASH_TRUNCATION]}" + ) -def get_project_config(project_dir: Union[str, Path]) -> Dict[str, Any]: +def get_project_config(project_dir: Union[str, Path]) -> dict[str, Any]: """Return pyproject.toml in the given project directory.""" # Load pyproject.toml file toml_path = Path(project_dir) / FAB_CONFIG_FILE @@ -127,7 +139,7 @@ def get_fused_config(run: Run, flwr_dir: Optional[Path]) -> UserConfig: if not run.fab_id or not run.fab_version: return {} - project_dir = get_project_dir(run.fab_id, run.fab_version, flwr_dir) + project_dir = get_project_dir(run.fab_id, run.fab_version, run.fab_hash, flwr_dir) # Return empty dict if project directory does not exist if not project_dir.is_dir(): @@ -137,13 +149,13 @@ def get_fused_config(run: Run, flwr_dir: Optional[Path]) -> UserConfig: def flatten_dict( - raw_dict: Optional[Dict[str, Any]], parent_key: str = "" + raw_dict: Optional[dict[str, Any]], parent_key: str = "" ) -> UserConfig: """Flatten dict by joining nested keys with a given separator.""" if raw_dict is None: return {} - items: List[Tuple[str, UserConfigValue]] = [] + items: list[tuple[str, UserConfigValue]] = [] separator: str = "." for k, v in raw_dict.items(): new_key = f"{parent_key}{separator}{k}" if parent_key else k @@ -159,9 +171,9 @@ def flatten_dict( return dict(items) -def unflatten_dict(flat_dict: Dict[str, Any]) -> Dict[str, Any]: +def unflatten_dict(flat_dict: dict[str, Any]) -> dict[str, Any]: """Unflatten a dict with keys containing separators into a nested dict.""" - unflattened_dict: Dict[str, Any] = {} + unflattened_dict: dict[str, Any] = {} separator: str = "." for key, value in flat_dict.items(): @@ -177,7 +189,7 @@ def unflatten_dict(flat_dict: Dict[str, Any]) -> Dict[str, Any]: def parse_config_args( - config: Optional[List[str]], + config: Optional[list[str]], ) -> UserConfig: """Parse separator separated list of key-value pairs separated by '='.""" overrides: UserConfig = {} @@ -185,28 +197,33 @@ def parse_config_args( if config is None: return overrides + # Handle if .toml file is passed + if len(config) == 1 and config[0].endswith(".toml"): + with Path(config[0]).open("rb") as config_file: + overrides = flatten_dict(tomli.load(config_file)) + return overrides + # Regular expression to capture key-value pairs with possible quoted values pattern = re.compile(r"(\S+?)=(\'[^\']*\'|\"[^\"]*\"|\S+)") + flat_overrides = {} for config_line in config: if config_line: - matches = pattern.findall(config_line) + # .toml files aren't allowed alongside other configs + if config_line.endswith(".toml"): + raise ValueError( + "TOML files cannot be passed alongside key-value pairs." + ) - if ( - len(matches) == 1 - and "=" not in matches[0][0] - and matches[0][0].endswith(".toml") - ): - with Path(matches[0][0]).open("rb") as config_file: - overrides = flatten_dict(tomli.load(config_file)) - else: - toml_str = "\n".join(f"{k} = {v}" for k, v in matches) - overrides.update(tomli.loads(toml_str)) + matches = pattern.findall(config_line) + toml_str = "\n".join(f"{k} = {v}" for k, v in matches) + overrides.update(tomli.loads(toml_str)) + flat_overrides = flatten_dict(overrides) - return overrides + return flat_overrides -def get_metadata_from_config(config: Dict[str, Any]) -> Tuple[str, str]: +def get_metadata_from_config(config: dict[str, Any]) -> tuple[str, str]: """Extract `fab_version` and `fab_id` from a project config.""" return ( config["project"]["version"], diff --git a/src/py/flwr/common/config_test.py b/src/py/flwr/common/config_test.py index 712e07264d3f..b2edd319e382 100644 --- a/src/py/flwr/common/config_test.py +++ b/src/py/flwr/common/config_test.py @@ -15,6 +15,7 @@ """Test util functions handling Flower config.""" import os +import tempfile import textwrap from pathlib import Path from unittest.mock import patch @@ -64,13 +65,22 @@ def test_get_flwr_dir_with_xdg_data_home() -> None: def test_get_project_dir_invalid_fab_id() -> None: """Test get_project_dir with an invalid fab_id.""" with pytest.raises(ValueError): - get_project_dir("invalid_fab_id", "1.0.0") + get_project_dir( + "invalid_fab_id", + "1.0.0", + "03840e932bf61247c1231f0aec9e8ec5f041ed5516fb23638f24d25f3a007acd", + ) def test_get_project_dir_valid() -> None: """Test get_project_dir with an valid fab_id and version.""" - app_path = get_project_dir("app_name/user", "1.0.0", flwr_dir=".") - assert app_path == Path("apps") / "app_name" / "user" / "1.0.0" + app_path = get_project_dir( + "app_name/user", + "1.0.0", + "03840e932bf61247c1231f0aec9e8ec5f041ed5516fb23638f24d25f3a007acd", + flwr_dir=".", + ) + assert app_path == Path("apps") / "app_name.user.1.0.0.03840e93" def test_get_project_config_file_not_found() -> None: @@ -254,3 +264,50 @@ def test_parse_config_args_overrides() -> None: "key5": True, "key6": "value6", } + + +def test_parse_config_args_from_toml_file() -> None: + """Test if a toml passed to --run-config it is loaded and fused correctly.""" + # Will be saved as a temp .toml file + toml_config = """ + num-server-rounds = 10 + momentum = 0.1 + verbose = true + """ + # This is the UserConfig that would be extracted from pyproject.toml + initial_run_config: UserConfig = { + "num-server-rounds": 5, + "momentum": 0.2, + "dataset": "my-fancy-dataset", + "verbose": False, + } + expected_config = { + "num-server-rounds": 10, + "momentum": 0.1, + "dataset": "my-fancy-dataset", + "verbose": True, + } + + # Create a temporary directory using a context manager + with tempfile.TemporaryDirectory() as temp_dir: + # Create a temporary TOML file within that directory + toml_config_file = os.path.join(temp_dir, "extra_config.toml") + + # Write the data to the TOML file + with open(toml_config_file, "w", encoding="utf-8") as toml_file: + toml_file.write(textwrap.dedent(toml_config)) + + # Parse config (this mimics what `--run-config path/to/config.toml` does) + config_from_toml = parse_config_args([toml_config_file]) + # Fuse + config = fuse_dicts(initial_run_config, config_from_toml) + + # Assert + assert config == expected_config + + +def test_parse_config_args_passing_toml_and_key_value() -> None: + """Test that passing a toml and key-value configs aren't allowed.""" + config = ["my-other-config.toml", "lr=0.1", "epochs=99"] + with pytest.raises(ValueError): + parse_config_args(config) diff --git a/src/py/flwr/common/constant.py b/src/py/flwr/common/constant.py index 72256a62add7..081fa49b2153 100644 --- a/src/py/flwr/common/constant.py +++ b/src/py/flwr/common/constant.py @@ -37,7 +37,17 @@ TRANSPORT_TYPE_VCE, ] -SUPEREXEC_DEFAULT_ADDRESS = "0.0.0.0:9093" +# Addresses +# SuperNode +CLIENTAPPIO_API_DEFAULT_ADDRESS = "0.0.0.0:9094" +# SuperLink +DRIVER_API_DEFAULT_ADDRESS = "0.0.0.0:9091" +FLEET_API_GRPC_RERE_DEFAULT_ADDRESS = "0.0.0.0:9092" +FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS = ( + "[::]:8080" # IPv6 to keep start_server compatible +) +FLEET_API_REST_DEFAULT_ADDRESS = "0.0.0.0:9095" +EXEC_API_DEFAULT_ADDRESS = "0.0.0.0:9093" # Constants for ping PING_DEFAULT_INTERVAL = 30 @@ -49,20 +59,33 @@ # IDs RUN_ID_NUM_BYTES = 8 NODE_ID_NUM_BYTES = 8 -GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY = "flower-version" -GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY = "should-exit" # Constants for FAB APP_DIR = "apps" +FAB_ALLOWED_EXTENSIONS = {".py", ".toml", ".md"} FAB_CONFIG_FILE = "pyproject.toml" +FAB_DATE = (2024, 10, 1, 0, 0, 0) +FAB_HASH_TRUNCATION = 8 FLWR_HOME = "FLWR_HOME" # Constants entries in Node config for Simulation PARTITION_ID_KEY = "partition-id" NUM_PARTITIONS_KEY = "num-partitions" -GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY = "flower-version" +# Constants for keys in `metadata` of `MessageContainer` in `grpc-adapter` +GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY = "flower-package-name" +GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY = "flower-package-version" +GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY = "flower-version" # Deprecated GRPC_ADAPTER_METADATA_SHOULD_EXIT_KEY = "should-exit" +GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY = "grpc-message-module" +GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY = "grpc-message-qualname" + +# Message TTL +MESSAGE_TTL_TOLERANCE = 1e-1 + +# Isolation modes +ISOLATION_MODE_SUBPROCESS = "subprocess" +ISOLATION_MODE_PROCESS = "process" class MessageType: diff --git a/src/py/flwr/common/differential_privacy.py b/src/py/flwr/common/differential_privacy.py index 85dc198ef8a0..56da98a3c805 100644 --- a/src/py/flwr/common/differential_privacy.py +++ b/src/py/flwr/common/differential_privacy.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Optional, Tuple +from typing import Optional import numpy as np @@ -125,7 +125,7 @@ def compute_adaptive_noise_params( noise_multiplier: float, num_sampled_clients: float, clipped_count_stddev: Optional[float], -) -> Tuple[float, float]: +) -> tuple[float, float]: """Compute noising parameters for the adaptive clipping. Paper: https://arxiv.org/abs/1905.03871 diff --git a/src/py/flwr/common/dp.py b/src/py/flwr/common/dp.py index 527805c8ef42..13ae94461ef9 100644 --- a/src/py/flwr/common/dp.py +++ b/src/py/flwr/common/dp.py @@ -15,8 +15,6 @@ """Building block functions for DP algorithms.""" -from typing import Tuple - import numpy as np from flwr.common.logger import warn_deprecated_feature @@ -41,7 +39,7 @@ def add_gaussian_noise(update: NDArrays, std_dev: float) -> NDArrays: return update_noised -def clip_by_l2(update: NDArrays, threshold: float) -> Tuple[NDArrays, bool]: +def clip_by_l2(update: NDArrays, threshold: float) -> tuple[NDArrays, bool]: """Scales the update so thats its L2 norm is upper-bound to threshold.""" warn_deprecated_feature("`clip_by_l2` method") update_norm = _get_update_norm(update) diff --git a/src/py/flwr/common/exit_handlers.py b/src/py/flwr/common/exit_handlers.py index 30750c28a450..e5898b46a537 100644 --- a/src/py/flwr/common/exit_handlers.py +++ b/src/py/flwr/common/exit_handlers.py @@ -19,7 +19,7 @@ from signal import SIGINT, SIGTERM, signal from threading import Thread from types import FrameType -from typing import List, Optional +from typing import Optional from grpc import Server @@ -28,8 +28,8 @@ def register_exit_handlers( event_type: EventType, - grpc_servers: Optional[List[Server]] = None, - bckg_threads: Optional[List[Thread]] = None, + grpc_servers: Optional[list[Server]] = None, + bckg_threads: Optional[list[Thread]] = None, ) -> None: """Register exit handlers for `SIGINT` and `SIGTERM` signals. diff --git a/src/py/flwr/common/grpc.py b/src/py/flwr/common/grpc.py index ec8fe823a7eb..5a29c595119c 100644 --- a/src/py/flwr/common/grpc.py +++ b/src/py/flwr/common/grpc.py @@ -15,8 +15,9 @@ """Utility functions for gRPC.""" +from collections.abc import Sequence from logging import DEBUG -from typing import Optional, Sequence +from typing import Optional import grpc diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index 2077f9beaca0..3a058abac9c6 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -18,7 +18,7 @@ import logging from logging import WARN, LogRecord from logging.handlers import HTTPHandler -from typing import TYPE_CHECKING, Any, Dict, Optional, TextIO, Tuple +from typing import TYPE_CHECKING, Any, Optional, TextIO # Create logger LOGGER_NAME = "flwr" @@ -111,7 +111,7 @@ def update_console_handler( class CustomHTTPHandler(HTTPHandler): """Custom HTTPHandler which overrides the mapLogRecords method.""" - # pylint: disable=too-many-arguments,bad-option-value,R1725 + # pylint: disable=too-many-arguments,bad-option-value,R1725,R0917 def __init__( self, identifier: str, @@ -119,12 +119,12 @@ def __init__( url: str, method: str = "GET", secure: bool = False, - credentials: Optional[Tuple[str, str]] = None, + credentials: Optional[tuple[str, str]] = None, ) -> None: super().__init__(host, url, method, secure, credentials) self.identifier = identifier - def mapLogRecord(self, record: LogRecord) -> Dict[str, Any]: + def mapLogRecord(self, record: LogRecord) -> dict[str, Any]: """Filter for the properties to be send to the logserver.""" record_dict = record.__dict__ return { diff --git a/src/py/flwr/common/message.py b/src/py/flwr/common/message.py index 4138fc95a591..3bb07ff3961a 100644 --- a/src/py/flwr/common/message.py +++ b/src/py/flwr/common/message.py @@ -17,9 +17,11 @@ from __future__ import annotations import time -import warnings +from logging import WARNING from typing import Optional, cast +from .constant import MESSAGE_TTL_TOLERANCE +from .logger import log from .record import RecordSet DEFAULT_TTL = 3600 @@ -50,7 +52,7 @@ class Metadata: # pylint: disable=too-many-instance-attributes the receiving end. """ - def __init__( # pylint: disable=too-many-arguments + def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments self, run_id: int, message_id: str, @@ -288,14 +290,12 @@ def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: follows the equation: ttl = msg.meta.ttl - (reply.meta.created_at - msg.meta.created_at) + + Returns + ------- + message : Message + A Message containing only the relevant error and metadata. """ - if ttl: - warnings.warn( - "A custom TTL was set, but note that the SuperLink does not enforce " - "the TTL yet. The SuperLink will start enforcing the TTL in a future " - "version of Flower.", - stacklevel=2, - ) # If no TTL passed, use default for message creation (will update after # message creation) ttl_ = DEFAULT_TTL if ttl is None else ttl @@ -309,6 +309,8 @@ def create_error_reply(self, error: Error, ttl: float | None = None) -> Message: ) message.metadata.ttl = ttl + self._limit_task_res_ttl(message) + return message def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: @@ -334,13 +336,6 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: Message A new `Message` instance representing the reply. """ - if ttl: - warnings.warn( - "A custom TTL was set, but note that the SuperLink does not enforce " - "the TTL yet. The SuperLink will start enforcing the TTL in a future " - "version of Flower.", - stacklevel=2, - ) # If no TTL passed, use default for message creation (will update after # message creation) ttl_ = DEFAULT_TTL if ttl is None else ttl @@ -357,6 +352,8 @@ def create_reply(self, content: RecordSet, ttl: float | None = None) -> Message: ) message.metadata.ttl = ttl + self._limit_task_res_ttl(message) + return message def __repr__(self) -> str: @@ -370,6 +367,31 @@ def __repr__(self) -> str: ) return f"{self.__class__.__qualname__}({view})" + def _limit_task_res_ttl(self, message: Message) -> None: + """Limit the TaskRes TTL to not exceed the expiration time of the TaskIns it + replies to. + + Parameters + ---------- + message : Message + The message to which the TaskRes is replying. + """ + # Calculate the maximum allowed TTL + max_allowed_ttl = ( + self.metadata.created_at + self.metadata.ttl - message.metadata.created_at + ) + + if message.metadata.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE: + log( + WARNING, + "The reply TTL of %.2f seconds exceeded the " + "allowed maximum of %.2f seconds. " + "The TTL has been updated to the allowed maximum.", + message.metadata.ttl, + max_allowed_ttl, + ) + message.metadata.ttl = max_allowed_ttl + def _create_reply_metadata(msg: Message, ttl: float) -> Metadata: """Construct metadata for a reply message.""" diff --git a/src/py/flwr/common/message_test.py b/src/py/flwr/common/message_test.py index c6142cb18256..d418f9fa8036 100644 --- a/src/py/flwr/common/message_test.py +++ b/src/py/flwr/common/message_test.py @@ -17,12 +17,13 @@ import time from collections import namedtuple from contextlib import ExitStack -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Optional import pytest # pylint: enable=E0611 from . import RecordSet +from .constant import MESSAGE_TTL_TOLERANCE from .message import Error, Message, Metadata from .serde_test import RecordMaker @@ -193,7 +194,7 @@ def test_create_reply( ), ], ) -def test_repr(cls: type, kwargs: Dict[str, Any]) -> None: +def test_repr(cls: type, kwargs: dict[str, Any]) -> None: """Test string representations of Metadata/Message/Error.""" # Prepare anon_cls = namedtuple(cls.__qualname__, kwargs.keys()) # type: ignore @@ -202,3 +203,35 @@ def test_repr(cls: type, kwargs: Dict[str, Any]) -> None: # Assert assert str(actual) == str(expected) + + +@pytest.mark.parametrize( + "message_creation_fn,initial_ttl,reply_ttl,expected_reply_ttl", + [ + # Case where the reply_ttl is larger than the allowed TTL + (create_message_with_content, 20, 30, 20), + (create_message_with_error, 20, 30, 20), + # Case where the reply_ttl is within the allowed range + (create_message_with_content, 20, 10, 10), + (create_message_with_error, 20, 10, 10), + ], +) +def test_reply_ttl_limitation( + message_creation_fn: Callable[[float], Message], + initial_ttl: float, + reply_ttl: float, + expected_reply_ttl: float, +) -> None: + """Test that the reply TTL does not exceed the allowed TTL.""" + message = message_creation_fn(initial_ttl) + + if message.has_error(): + dummy_error = Error(code=0, reason="test error") + reply_message = message.create_error_reply(dummy_error, ttl=reply_ttl) + else: + reply_message = message.create_reply(content=RecordSet(), ttl=reply_ttl) + + assert reply_message.metadata.ttl - expected_reply_ttl <= MESSAGE_TTL_TOLERANCE, ( + f"Expected TTL to be <= {expected_reply_ttl}, " + f"but got {reply_message.metadata.ttl}" + ) diff --git a/src/py/flwr/common/object_ref.py b/src/py/flwr/common/object_ref.py index 9723c14037a0..6259b5ab557d 100644 --- a/src/py/flwr/common/object_ref.py +++ b/src/py/flwr/common/object_ref.py @@ -21,7 +21,7 @@ from importlib.util import find_spec from logging import WARN from pathlib import Path -from typing import Any, Optional, Tuple, Type, Union +from typing import Any, Optional, Union from .logger import log @@ -40,7 +40,7 @@ def validate( module_attribute_str: str, check_module: bool = True, project_dir: Optional[Union[str, Path]] = None, -) -> Tuple[bool, Optional[str]]: +) -> tuple[bool, Optional[str]]: """Validate object reference. Parameters @@ -106,7 +106,7 @@ def validate( def load_app( # pylint: disable= too-many-branches module_attribute_str: str, - error_type: Type[Exception], + error_type: type[Exception], project_dir: Optional[Union[str, Path]] = None, ) -> Any: """Return the object specified in a module attribute string. diff --git a/src/py/flwr/common/record/configsrecord.py b/src/py/flwr/common/record/configsrecord.py index aeb311089bcd..e83bca816fc6 100644 --- a/src/py/flwr/common/record/configsrecord.py +++ b/src/py/flwr/common/record/configsrecord.py @@ -15,7 +15,7 @@ """ConfigsRecord.""" -from typing import Dict, List, Optional, get_args +from typing import Optional, get_args from flwr.common.typing import ConfigsRecordValues, ConfigsScalar @@ -109,7 +109,7 @@ class ConfigsRecord(TypedDict[str, ConfigsRecordValues]): def __init__( self, - configs_dict: Optional[Dict[str, ConfigsRecordValues]] = None, + configs_dict: Optional[dict[str, ConfigsRecordValues]] = None, keep_input: bool = True, ) -> None: @@ -128,6 +128,7 @@ def count_bytes(self) -> int: def get_var_bytes(value: ConfigsScalar) -> int: """Return Bytes of value passed.""" + var_bytes = 0 if isinstance(value, bool): var_bytes = 1 elif isinstance(value, (int, float)): @@ -136,12 +137,17 @@ def get_var_bytes(value: ConfigsScalar) -> int: ) if isinstance(value, (str, bytes)): var_bytes = len(value) + if var_bytes == 0: + raise ValueError( + "Config values must be either `bool`, `int`, `float`, " + "`str`, or `bytes`" + ) return var_bytes num_bytes = 0 for k, v in self.items(): - if isinstance(v, List): + if isinstance(v, list): if isinstance(v[0], (bytes, str)): # not all str are of equal length necessarily # for both the footprint of each element is 1 Byte diff --git a/src/py/flwr/common/record/metricsrecord.py b/src/py/flwr/common/record/metricsrecord.py index 868ed82e79ca..d0a6123c807f 100644 --- a/src/py/flwr/common/record/metricsrecord.py +++ b/src/py/flwr/common/record/metricsrecord.py @@ -15,7 +15,7 @@ """MetricsRecord.""" -from typing import Dict, List, Optional, get_args +from typing import Optional, get_args from flwr.common.typing import MetricsRecordValues, MetricsScalar @@ -115,7 +115,7 @@ class MetricsRecord(TypedDict[str, MetricsRecordValues]): def __init__( self, - metrics_dict: Optional[Dict[str, MetricsRecordValues]] = None, + metrics_dict: Optional[dict[str, MetricsRecordValues]] = None, keep_input: bool = True, ): super().__init__(_check_key, _check_value) @@ -130,7 +130,7 @@ def count_bytes(self) -> int: num_bytes = 0 for k, v in self.items(): - if isinstance(v, List): + if isinstance(v, list): # both int and float normally take 4 bytes # But MetricRecords are mapped to 64bit int/float # during protobuffing diff --git a/src/py/flwr/common/record/parametersrecord.py b/src/py/flwr/common/record/parametersrecord.py index f088d682497b..10ec65ca0277 100644 --- a/src/py/flwr/common/record/parametersrecord.py +++ b/src/py/flwr/common/record/parametersrecord.py @@ -14,9 +14,10 @@ # ============================================================================== """ParametersRecord and Array.""" +from collections import OrderedDict from dataclasses import dataclass from io import BytesIO -from typing import List, Optional, OrderedDict, cast +from typing import Optional, cast import numpy as np @@ -51,7 +52,7 @@ class Array: """ dtype: str - shape: List[int] + shape: list[int] stype: str data: bytes diff --git a/src/py/flwr/common/record/parametersrecord_test.py b/src/py/flwr/common/record/parametersrecord_test.py index e840e5e266e4..9ac18a3ec854 100644 --- a/src/py/flwr/common/record/parametersrecord_test.py +++ b/src/py/flwr/common/record/parametersrecord_test.py @@ -17,7 +17,6 @@ import unittest from collections import OrderedDict from io import BytesIO -from typing import List import numpy as np import pytest @@ -81,7 +80,7 @@ def test_numpy_conversion_invalid(self) -> None: ([31, 153], "bool_"), # bool_ is represented as a whole Byte in NumPy ], ) -def test_count_bytes(shape: List[int], dtype: str) -> None: +def test_count_bytes(shape: list[int], dtype: str) -> None: """Test bytes in a ParametersRecord are computed correctly.""" original_array = np.random.randn(*shape).astype(np.dtype(dtype)) diff --git a/src/py/flwr/common/record/recordset.py b/src/py/flwr/common/record/recordset.py index f16a22695d6e..b2d1da4411bb 100644 --- a/src/py/flwr/common/record/recordset.py +++ b/src/py/flwr/common/record/recordset.py @@ -119,7 +119,7 @@ class RecordSet: Let's see an example. >>> from flwr.common import RecordSet - >>> from flwr.common import ConfigsRecords, MetricsRecords, ParametersRecord + >>> from flwr.common import ConfigsRecord, MetricsRecord, ParametersRecord >>> >>> # Let's begin with an empty record >>> my_recordset = RecordSet() diff --git a/src/py/flwr/common/record/recordset_test.py b/src/py/flwr/common/record/recordset_test.py index 96556d335f4c..154e320e5f0b 100644 --- a/src/py/flwr/common/record/recordset_test.py +++ b/src/py/flwr/common/record/recordset_test.py @@ -15,9 +15,9 @@ """RecordSet tests.""" import pickle -from collections import namedtuple +from collections import OrderedDict, namedtuple from copy import deepcopy -from typing import Callable, Dict, List, OrderedDict, Type, Union +from typing import Callable, Union import numpy as np import pytest @@ -158,8 +158,8 @@ def test_set_parameters_with_correct_types() -> None: ], ) def test_set_parameters_with_incorrect_types( - key_type: Type[Union[int, str]], - value_fn: Callable[[NDArray], Union[NDArray, List[float]]], + key_type: type[Union[int, str]], + value_fn: Callable[[NDArray], Union[NDArray, list[float]]], ) -> None: """Test adding dictionary of unsupported types to ParametersRecord.""" p_record = ParametersRecord() @@ -183,7 +183,7 @@ def test_set_parameters_with_incorrect_types( ], ) def test_set_metrics_to_metricsrecord_with_correct_types( - key_type: Type[str], + key_type: type[str], value_fn: Callable[[NDArray], MetricsRecordValues], ) -> None: """Test adding metrics of various types to a MetricsRecord.""" @@ -236,8 +236,8 @@ def test_set_metrics_to_metricsrecord_with_correct_types( ], ) def test_set_metrics_to_metricsrecord_with_incorrect_types( - key_type: Type[Union[str, int, float, bool]], - value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], + key_type: type[Union[str, int, float, bool]], + value_fn: Callable[[NDArray], Union[NDArray, dict[str, NDArray], list[float]]], ) -> None: """Test adding metrics of various unsupported types to a MetricsRecord.""" m_record = MetricsRecord() @@ -302,7 +302,7 @@ def test_set_metrics_to_metricsrecord_with_and_without_keeping_input( ], ) def test_set_configs_to_configsrecord_with_correct_types( - key_type: Type[str], + key_type: type[str], value_fn: Callable[[NDArray], ConfigsRecordValues], ) -> None: """Test adding configs of various types to a ConfigsRecord.""" @@ -346,8 +346,8 @@ def test_set_configs_to_configsrecord_with_correct_types( ], ) def test_set_configs_to_configsrecord_with_incorrect_types( - key_type: Type[Union[str, int, float]], - value_fn: Callable[[NDArray], Union[NDArray, Dict[str, NDArray], List[float]]], + key_type: type[Union[str, int, float]], + value_fn: Callable[[NDArray], Union[NDArray, dict[str, NDArray], list[float]]], ) -> None: """Test adding configs of various unsupported types to a ConfigsRecord.""" c_record = ConfigsRecord() diff --git a/src/py/flwr/common/record/typeddict.py b/src/py/flwr/common/record/typeddict.py index 791077d8eff2..c2c8548c4de3 100644 --- a/src/py/flwr/common/record/typeddict.py +++ b/src/py/flwr/common/record/typeddict.py @@ -15,7 +15,8 @@ """Typed dict base class for *Records.""" -from typing import Callable, Dict, Generic, Iterator, MutableMapping, TypeVar, cast +from collections.abc import ItemsView, Iterator, KeysView, MutableMapping, ValuesView +from typing import Callable, Generic, TypeVar, cast K = TypeVar("K") # Key type V = TypeVar("V") # Value type @@ -38,38 +39,50 @@ def __setitem__(self, key: K, value: V) -> None: cast(Callable[[V], None], self.__dict__["_check_value_fn"])(value) # Set key-value pair - cast(Dict[K, V], self.__dict__["_data"])[key] = value + cast(dict[K, V], self.__dict__["_data"])[key] = value def __delitem__(self, key: K) -> None: """Remove the item with the specified key.""" - del cast(Dict[K, V], self.__dict__["_data"])[key] + del cast(dict[K, V], self.__dict__["_data"])[key] def __getitem__(self, item: K) -> V: """Return the value for the specified key.""" - return cast(Dict[K, V], self.__dict__["_data"])[item] + return cast(dict[K, V], self.__dict__["_data"])[item] def __iter__(self) -> Iterator[K]: """Yield an iterator over the keys of the dictionary.""" - return iter(cast(Dict[K, V], self.__dict__["_data"])) + return iter(cast(dict[K, V], self.__dict__["_data"])) def __repr__(self) -> str: """Return a string representation of the dictionary.""" - return cast(Dict[K, V], self.__dict__["_data"]).__repr__() + return cast(dict[K, V], self.__dict__["_data"]).__repr__() def __len__(self) -> int: """Return the number of items in the dictionary.""" - return len(cast(Dict[K, V], self.__dict__["_data"])) + return len(cast(dict[K, V], self.__dict__["_data"])) def __contains__(self, key: object) -> bool: """Check if the dictionary contains the specified key.""" - return key in cast(Dict[K, V], self.__dict__["_data"]) + return key in cast(dict[K, V], self.__dict__["_data"]) def __eq__(self, other: object) -> bool: """Compare this instance to another dictionary or TypedDict.""" - data = cast(Dict[K, V], self.__dict__["_data"]) + data = cast(dict[K, V], self.__dict__["_data"]) if isinstance(other, TypedDict): - other_data = cast(Dict[K, V], other.__dict__["_data"]) + other_data = cast(dict[K, V], other.__dict__["_data"]) return data == other_data if isinstance(other, dict): return data == other return NotImplemented + + def keys(self) -> KeysView[K]: + """D.keys() -> a set-like object providing a view on D's keys.""" + return cast(dict[K, V], self.__dict__["_data"]).keys() + + def values(self) -> ValuesView[V]: + """D.values() -> an object providing a view on D's values.""" + return cast(dict[K, V], self.__dict__["_data"]).values() + + def items(self) -> ItemsView[K, V]: + """D.items() -> a set-like object providing a view on D's items.""" + return cast(dict[K, V], self.__dict__["_data"]).items() diff --git a/src/py/flwr/common/recordset_compat.py b/src/py/flwr/common/recordset_compat.py index 8bf884c30e58..4641b8f29c96 100644 --- a/src/py/flwr/common/recordset_compat.py +++ b/src/py/flwr/common/recordset_compat.py @@ -15,7 +15,9 @@ """RecordSet utilities.""" -from typing import Dict, Mapping, OrderedDict, Tuple, Union, cast, get_args +from collections import OrderedDict +from collections.abc import Mapping +from typing import Union, cast, get_args from . import Array, ConfigsRecord, MetricsRecord, ParametersRecord, RecordSet from .typing import ( @@ -57,6 +59,11 @@ def parametersrecord_to_parameters( keep_input : bool A boolean indicating whether entries in the record should be deleted from the input dictionary immediately after adding them to the record. + + Returns + ------- + parameters : Parameters + The parameters in the legacy format Parameters. """ parameters = Parameters(tensors=[], tensor_type="") @@ -92,6 +99,11 @@ def parameters_to_parametersrecord( A boolean indicating whether parameters should be deleted from the input Parameters object (i.e. a list of serialized NumPy arrays) immediately after adding them to the record. + + Returns + ------- + ParametersRecord + The ParametersRecord containing the provided parameters. """ tensor_type = parameters.tensor_type @@ -115,7 +127,7 @@ def parameters_to_parametersrecord( def _check_mapping_from_recordscalartype_to_scalar( record_data: Mapping[str, Union[ConfigsRecordValues, MetricsRecordValues]] -) -> Dict[str, Scalar]: +) -> dict[str, Scalar]: """Check mapping `common.*RecordValues` into `common.Scalar` is possible.""" for value in record_data.values(): if not isinstance(value, get_args(Scalar)): @@ -126,14 +138,14 @@ def _check_mapping_from_recordscalartype_to_scalar( "supported by the `common.RecordSet` infrastructure. " f"You used type: {type(value)}" ) - return cast(Dict[str, Scalar], record_data) + return cast(dict[str, Scalar], record_data) def _recordset_to_fit_or_evaluate_ins_components( recordset: RecordSet, ins_str: str, keep_input: bool, -) -> Tuple[Parameters, Dict[str, Scalar]]: +) -> tuple[Parameters, dict[str, Scalar]]: """Derive Fit/Evaluate Ins from a RecordSet.""" # get Array and construct Parameters parameters_record = recordset.parameters_records[f"{ins_str}.parameters"] @@ -169,7 +181,7 @@ def _fit_or_evaluate_ins_to_recordset( def _embed_status_into_recordset( res_str: str, status: Status, recordset: RecordSet ) -> RecordSet: - status_dict: Dict[str, ConfigsRecordValues] = { + status_dict: dict[str, ConfigsRecordValues] = { "code": int(status.code.value), "message": status.message, } diff --git a/src/py/flwr/common/recordset_compat_test.py b/src/py/flwr/common/recordset_compat_test.py index e0ac7f216af9..05d821e37e40 100644 --- a/src/py/flwr/common/recordset_compat_test.py +++ b/src/py/flwr/common/recordset_compat_test.py @@ -15,7 +15,7 @@ """RecordSet from legacy messages tests.""" from copy import deepcopy -from typing import Callable, Dict +from typing import Callable import numpy as np import pytest @@ -82,7 +82,7 @@ def _get_valid_fitins_with_empty_ndarrays() -> FitIns: def _get_valid_fitres() -> FitRes: """Returnn Valid parameters but potentially invalid config.""" arrays = get_ndarrays() - metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + metrics: dict[str, Scalar] = {"a": 1.0, "b": 0} return FitRes( parameters=ndarrays_to_parameters(arrays), num_examples=1, @@ -98,7 +98,7 @@ def _get_valid_evaluateins() -> EvaluateIns: def _get_valid_evaluateres() -> EvaluateRes: """Return potentially invalid config.""" - metrics: Dict[str, Scalar] = {"a": 1.0, "b": 0} + metrics: dict[str, Scalar] = {"a": 1.0, "b": 0} return EvaluateRes( num_examples=1, loss=0.1, @@ -108,7 +108,7 @@ def _get_valid_evaluateres() -> EvaluateRes: def _get_valid_getparametersins() -> GetParametersIns: - config_dict: Dict[str, Scalar] = { + config_dict: dict[str, Scalar] = { "a": 1.0, "b": 3, "c": True, @@ -131,7 +131,7 @@ def _get_valid_getpropertiesins() -> GetPropertiesIns: def _get_valid_getpropertiesres() -> GetPropertiesRes: - config_dict: Dict[str, Scalar] = { + config_dict: dict[str, Scalar] = { "a": 1.0, "b": 3, "c": True, diff --git a/src/py/flwr/common/retry_invoker.py b/src/py/flwr/common/retry_invoker.py index d12124b89840..9785b0fbd9b4 100644 --- a/src/py/flwr/common/retry_invoker.py +++ b/src/py/flwr/common/retry_invoker.py @@ -18,20 +18,9 @@ import itertools import random import time +from collections.abc import Generator, Iterable from dataclasses import dataclass -from typing import ( - Any, - Callable, - Dict, - Generator, - Iterable, - List, - Optional, - Tuple, - Type, - Union, - cast, -) +from typing import Any, Callable, Optional, Union, cast def exponential( @@ -49,6 +38,11 @@ def exponential( Factor by which the delay is multiplied after each retry. max_delay: Optional[float] (default: None) The maximum delay duration between two consecutive retries. + + Returns + ------- + Generator[float, None, None] + A generator for the delay between 2 retries. """ delay = base_delay if max_delay is None else min(base_delay, max_delay) while True: @@ -67,6 +61,11 @@ def constant( ---------- interval: Union[float, Iterable[float]] (default: 1) A constant value to yield or an iterable of such values. + + Returns + ------- + Generator[float, None, None] + A generator for the delay between 2 retries. """ if not isinstance(interval, Iterable): interval = itertools.repeat(interval) @@ -84,6 +83,11 @@ def full_jitter(max_value: float) -> float: ---------- max_value : float The upper limit for the randomized value. + + Returns + ------- + float + A random float that is less than max_value. """ return random.uniform(0, max_value) @@ -93,8 +97,8 @@ class RetryState: """State for callbacks in RetryInvoker.""" target: Callable[..., Any] - args: Tuple[Any, ...] - kwargs: Dict[str, Any] + args: tuple[Any, ...] + kwargs: dict[str, Any] tries: int elapsed_time: float exception: Optional[Exception] = None @@ -167,7 +171,7 @@ class RetryInvoker: def __init__( self, wait_gen_factory: Callable[[], Generator[float, None, None]], - recoverable_exceptions: Union[Type[Exception], Tuple[Type[Exception], ...]], + recoverable_exceptions: Union[type[Exception], tuple[type[Exception], ...]], max_tries: Optional[int], max_time: Optional[float], *, @@ -244,7 +248,7 @@ def try_call_event_handler( try_cnt = 0 wait_generator = self.wait_gen_factory() start = time.monotonic() - ref_state: List[Optional[RetryState]] = [None] + ref_state: list[Optional[RetryState]] = [None] while True: try_cnt += 1 diff --git a/src/py/flwr/common/retry_invoker_test.py b/src/py/flwr/common/retry_invoker_test.py index 2259ae47ded4..a9f2625ff443 100644 --- a/src/py/flwr/common/retry_invoker_test.py +++ b/src/py/flwr/common/retry_invoker_test.py @@ -15,7 +15,7 @@ """Tests for `RetryInvoker`.""" -from typing import Generator +from collections.abc import Generator from unittest.mock import MagicMock, Mock, patch import pytest diff --git a/src/py/flwr/common/secure_aggregation/crypto/shamir.py b/src/py/flwr/common/secure_aggregation/crypto/shamir.py index 688bfa2153ea..9c7e67abf94f 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/shamir.py +++ b/src/py/flwr/common/secure_aggregation/crypto/shamir.py @@ -17,20 +17,20 @@ import pickle from concurrent.futures import ThreadPoolExecutor -from typing import List, Tuple, cast +from typing import cast from Crypto.Protocol.SecretSharing import Shamir from Crypto.Util.Padding import pad, unpad -def create_shares(secret: bytes, threshold: int, num: int) -> List[bytes]: +def create_shares(secret: bytes, threshold: int, num: int) -> list[bytes]: """Return list of shares (bytes).""" secret_padded = pad(secret, 16) secret_padded_chunk = [ (threshold, num, secret_padded[i : i + 16]) for i in range(0, len(secret_padded), 16) ] - share_list: List[List[Tuple[int, bytes]]] = [[] for _ in range(num)] + share_list: list[list[tuple[int, bytes]]] = [[] for _ in range(num)] with ThreadPoolExecutor(max_workers=10) as executor: for chunk_shares in executor.map( @@ -43,22 +43,22 @@ def create_shares(secret: bytes, threshold: int, num: int) -> List[bytes]: return [pickle.dumps(shares) for shares in share_list] -def _shamir_split(threshold: int, num: int, chunk: bytes) -> List[Tuple[int, bytes]]: +def _shamir_split(threshold: int, num: int, chunk: bytes) -> list[tuple[int, bytes]]: return Shamir.split(threshold, num, chunk, ssss=False) # Reconstructing secret with PyCryptodome -def combine_shares(share_list: List[bytes]) -> bytes: +def combine_shares(share_list: list[bytes]) -> bytes: """Reconstruct secret from shares.""" - unpickled_share_list: List[List[Tuple[int, bytes]]] = [ - cast(List[Tuple[int, bytes]], pickle.loads(share)) for share in share_list + unpickled_share_list: list[list[tuple[int, bytes]]] = [ + cast(list[tuple[int, bytes]], pickle.loads(share)) for share in share_list ] chunk_num = len(unpickled_share_list[0]) secret_padded = bytearray(0) - chunk_shares_list: List[List[Tuple[int, bytes]]] = [] + chunk_shares_list: list[list[tuple[int, bytes]]] = [] for i in range(chunk_num): - chunk_shares: List[Tuple[int, bytes]] = [] + chunk_shares: list[tuple[int, bytes]] = [] for share in unpickled_share_list: chunk_shares.append(share[i]) chunk_shares_list.append(chunk_shares) @@ -71,5 +71,5 @@ def combine_shares(share_list: List[bytes]) -> bytes: return bytes(secret) -def _shamir_combine(shares: List[Tuple[int, bytes]]) -> bytes: +def _shamir_combine(shares: list[tuple[int, bytes]]) -> bytes: return Shamir.combine(shares, ssss=False) diff --git a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py index 59ca84d604b8..f5c130fb2663 100644 --- a/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py +++ b/src/py/flwr/common/secure_aggregation/crypto/symmetric_encryption.py @@ -16,7 +16,7 @@ import base64 -from typing import Tuple, cast +from typing import cast from cryptography.exceptions import InvalidSignature from cryptography.fernet import Fernet @@ -26,7 +26,7 @@ def generate_key_pairs() -> ( - Tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] + tuple[ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey] ): """Generate private and public key pairs with Cryptography.""" private_key = ec.generate_private_key(ec.SECP384R1()) diff --git a/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py b/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py index 207c15b61518..3197fd852f3d 100644 --- a/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py +++ b/src/py/flwr/common/secure_aggregation/ndarrays_arithmetic.py @@ -15,51 +15,51 @@ """Utility functions for performing operations on Numpy NDArrays.""" -from typing import Any, List, Tuple, Union +from typing import Any, Union import numpy as np from numpy.typing import DTypeLike, NDArray -def factor_combine(factor: int, parameters: List[NDArray[Any]]) -> List[NDArray[Any]]: +def factor_combine(factor: int, parameters: list[NDArray[Any]]) -> list[NDArray[Any]]: """Combine factor with parameters.""" return [np.array([factor])] + parameters def factor_extract( - parameters: List[NDArray[Any]], -) -> Tuple[int, List[NDArray[Any]]]: + parameters: list[NDArray[Any]], +) -> tuple[int, list[NDArray[Any]]]: """Extract factor from parameters.""" return parameters[0][0], parameters[1:] -def get_parameters_shape(parameters: List[NDArray[Any]]) -> List[Tuple[int, ...]]: +def get_parameters_shape(parameters: list[NDArray[Any]]) -> list[tuple[int, ...]]: """Get dimensions of each NDArray in parameters.""" return [arr.shape for arr in parameters] def get_zero_parameters( - dimensions_list: List[Tuple[int, ...]], dtype: DTypeLike = np.int64 -) -> List[NDArray[Any]]: + dimensions_list: list[tuple[int, ...]], dtype: DTypeLike = np.int64 +) -> list[NDArray[Any]]: """Generate zero parameters based on the dimensions list.""" return [np.zeros(dimensions, dtype=dtype) for dimensions in dimensions_list] def parameters_addition( - parameters1: List[NDArray[Any]], parameters2: List[NDArray[Any]] -) -> List[NDArray[Any]]: + parameters1: list[NDArray[Any]], parameters2: list[NDArray[Any]] +) -> list[NDArray[Any]]: """Add two parameters.""" return [parameters1[idx] + parameters2[idx] for idx in range(len(parameters1))] def parameters_subtraction( - parameters1: List[NDArray[Any]], parameters2: List[NDArray[Any]] -) -> List[NDArray[Any]]: + parameters1: list[NDArray[Any]], parameters2: list[NDArray[Any]] +) -> list[NDArray[Any]]: """Subtract parameters from the other parameters.""" return [parameters1[idx] - parameters2[idx] for idx in range(len(parameters1))] -def parameters_mod(parameters: List[NDArray[Any]], divisor: int) -> List[NDArray[Any]]: +def parameters_mod(parameters: list[NDArray[Any]], divisor: int) -> list[NDArray[Any]]: """Take mod of parameters with an integer divisor.""" if bin(divisor).count("1") == 1: msk = divisor - 1 @@ -68,14 +68,14 @@ def parameters_mod(parameters: List[NDArray[Any]], divisor: int) -> List[NDArray def parameters_multiply( - parameters: List[NDArray[Any]], multiplier: Union[int, float] -) -> List[NDArray[Any]]: + parameters: list[NDArray[Any]], multiplier: Union[int, float] +) -> list[NDArray[Any]]: """Multiply parameters by an integer/float multiplier.""" return [parameters[idx] * multiplier for idx in range(len(parameters))] def parameters_divide( - parameters: List[NDArray[Any]], divisor: Union[int, float] -) -> List[NDArray[Any]]: + parameters: list[NDArray[Any]], divisor: Union[int, float] +) -> list[NDArray[Any]]: """Divide weight by an integer/float divisor.""" return [parameters[idx] / divisor for idx in range(len(parameters))] diff --git a/src/py/flwr/common/secure_aggregation/quantization.py b/src/py/flwr/common/secure_aggregation/quantization.py index 7946276b6a4f..ab8521eed981 100644 --- a/src/py/flwr/common/secure_aggregation/quantization.py +++ b/src/py/flwr/common/secure_aggregation/quantization.py @@ -15,7 +15,7 @@ """Utility functions for model quantization.""" -from typing import List, cast +from typing import cast import numpy as np @@ -30,10 +30,10 @@ def _stochastic_round(arr: NDArrayFloat) -> NDArrayInt: def quantize( - parameters: List[NDArrayFloat], clipping_range: float, target_range: int -) -> List[NDArrayInt]: + parameters: list[NDArrayFloat], clipping_range: float, target_range: int +) -> list[NDArrayInt]: """Quantize float Numpy arrays to integer Numpy arrays.""" - quantized_list: List[NDArrayInt] = [] + quantized_list: list[NDArrayInt] = [] quantizer = target_range / (2 * clipping_range) for arr in parameters: # Stochastic quantization @@ -49,12 +49,12 @@ def quantize( # Dequantize parameters to range [-clipping_range, clipping_range] def dequantize( - quantized_parameters: List[NDArrayInt], + quantized_parameters: list[NDArrayInt], clipping_range: float, target_range: int, -) -> List[NDArrayFloat]: +) -> list[NDArrayFloat]: """Dequantize integer Numpy arrays to float Numpy arrays.""" - reverse_quantized_list: List[NDArrayFloat] = [] + reverse_quantized_list: list[NDArrayFloat] = [] quantizer = (2 * clipping_range) / target_range shift = -clipping_range for arr in quantized_parameters: diff --git a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py index cf6ac3bfb003..919894d5388f 100644 --- a/src/py/flwr/common/secure_aggregation/secaggplus_utils.py +++ b/src/py/flwr/common/secure_aggregation/secaggplus_utils.py @@ -15,8 +15,6 @@ """Utility functions for the SecAgg/SecAgg+ protocol.""" -from typing import List, Tuple - import numpy as np from flwr.common.typing import NDArrayInt @@ -45,8 +43,8 @@ def share_keys_plaintext_concat( """ return b"".join( [ - int.to_bytes(src_node_id, 8, "little", signed=True), - int.to_bytes(dst_node_id, 8, "little", signed=True), + int.to_bytes(src_node_id, 8, "little", signed=False), + int.to_bytes(dst_node_id, 8, "little", signed=False), int.to_bytes(len(b_share), 4, "little"), b_share, sk_share, @@ -54,7 +52,7 @@ def share_keys_plaintext_concat( ) -def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, bytes]: +def share_keys_plaintext_separate(plaintext: bytes) -> tuple[int, int, bytes, bytes]: """Retrieve arguments from bytes. Parameters @@ -74,8 +72,8 @@ def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, by the secret key share of the source sent to the destination. """ src, dst, mark = ( - int.from_bytes(plaintext[:8], "little", signed=True), - int.from_bytes(plaintext[8:16], "little", signed=True), + int.from_bytes(plaintext[:8], "little", signed=False), + int.from_bytes(plaintext[8:16], "little", signed=False), int.from_bytes(plaintext[16:20], "little"), ) ret = (src, dst, plaintext[20 : 20 + mark], plaintext[20 + mark :]) @@ -83,8 +81,8 @@ def share_keys_plaintext_separate(plaintext: bytes) -> Tuple[int, int, bytes, by def pseudo_rand_gen( - seed: bytes, num_range: int, dimensions_list: List[Tuple[int, ...]] -) -> List[NDArrayInt]: + seed: bytes, num_range: int, dimensions_list: list[tuple[int, ...]] +) -> list[NDArrayInt]: """Seeded pseudo-random number generator for noise generation with Numpy.""" assert len(seed) & 0x3 == 0 seed32 = 0 diff --git a/src/py/flwr/common/serde.py b/src/py/flwr/common/serde.py index 76265b9836d1..54790992b40d 100644 --- a/src/py/flwr/common/serde.py +++ b/src/py/flwr/common/serde.py @@ -15,7 +15,9 @@ """ProtoBuf serialization and deserialization.""" -from typing import Any, Dict, List, MutableMapping, OrderedDict, Type, TypeVar, cast +from collections import OrderedDict +from collections.abc import MutableMapping +from typing import Any, TypeVar, cast from google.protobuf.message import Message as GrpcMessage @@ -36,7 +38,7 @@ from flwr.proto.recordset_pb2 import MetricsRecordValue as ProtoMetricsRecordValue from flwr.proto.recordset_pb2 import ParametersRecord as ProtoParametersRecord from flwr.proto.recordset_pb2 import RecordSet as ProtoRecordSet -from flwr.proto.recordset_pb2 import Sint64List, StringList +from flwr.proto.recordset_pb2 import SintList, StringList, UintList from flwr.proto.run_pb2 import Run as ProtoRun from flwr.proto.task_pb2 import Task, TaskIns, TaskRes from flwr.proto.transport_pb2 import ( @@ -72,7 +74,7 @@ def parameters_to_proto(parameters: typing.Parameters) -> Parameters: def parameters_from_proto(msg: Parameters) -> typing.Parameters: """Deserialize `Parameters` from ProtoBuf.""" - tensors: List[bytes] = list(msg.tensors) + tensors: list[bytes] = list(msg.tensors) return typing.Parameters(tensors=tensors, tensor_type=msg.tensor_type) @@ -338,6 +340,7 @@ def metrics_from_proto(proto: Any) -> typing.Metrics: # === Scalar messages === +INT64_MAX_VALUE = 9223372036854775807 # (1 << 63) - 1 def scalar_to_proto(scalar: typing.Scalar) -> Scalar: @@ -352,6 +355,9 @@ def scalar_to_proto(scalar: typing.Scalar) -> Scalar: return Scalar(double=scalar) if isinstance(scalar, int): + # Use uint64 for integers larger than the maximum value of sint64 + if scalar > INT64_MAX_VALUE: + return Scalar(uint64=scalar) return Scalar(sint64=scalar) if isinstance(scalar, str): @@ -372,16 +378,16 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: # === Record messages === -_type_to_field = { +_type_to_field: dict[type, str] = { float: "double", int: "sint64", bool: "bool", str: "string", bytes: "bytes", } -_list_type_to_class_and_field = { +_list_type_to_class_and_field: dict[type, tuple[type[GrpcMessage], str]] = { float: (DoubleList, "double_list"), - int: (Sint64List, "sint64_list"), + int: (SintList, "sint_list"), bool: (BoolList, "bool_list"), str: (StringList, "string_list"), bytes: (BytesList, "bytes_list"), @@ -389,8 +395,13 @@ def scalar_from_proto(scalar_msg: Scalar) -> typing.Scalar: T = TypeVar("T") +def _is_uint64(value: Any) -> bool: + """Check if a value is uint64.""" + return isinstance(value, int) and value > INT64_MAX_VALUE + + def _record_value_to_proto( - value: Any, allowed_types: List[type], proto_class: Type[T] + value: Any, allowed_types: list[type], proto_class: type[T] ) -> T: """Serialize `*RecordValue` to ProtoBuf. @@ -401,12 +412,18 @@ def _record_value_to_proto( # Single element # Note: `isinstance(False, int) == True`. if isinstance(value, t): - arg[_type_to_field[t]] = value + fld = _type_to_field[t] + if t is int and _is_uint64(value): + fld = "uint64" + arg[fld] = value return proto_class(**arg) # List if isinstance(value, list) and all(isinstance(item, t) for item in value): - list_class, field_name = _list_type_to_class_and_field[t] - arg[field_name] = list_class(vals=value) + list_class, fld = _list_type_to_class_and_field[t] + # Use UintList if any element is of type `uint64`. + if t is int and any(_is_uint64(v) for v in value): + list_class, fld = UintList, "uint_list" + arg[fld] = list_class(vals=value) return proto_class(**arg) # Invalid types raise TypeError( @@ -427,9 +444,9 @@ def _record_value_from_proto(value_proto: GrpcMessage) -> Any: def _record_value_dict_to_proto( value_dict: TypedDict[str, Any], - allowed_types: List[type], - value_proto_class: Type[T], -) -> Dict[str, T]: + allowed_types: list[type], + value_proto_class: type[T], +) -> dict[str, T]: """Serialize the record value dict to ProtoBuf. Note: `bool` MUST be put in the front of allowd_types if it exists. @@ -447,7 +464,7 @@ def proto(_v: Any) -> T: def _record_value_dict_from_proto( value_dict_proto: MutableMapping[str, Any] -) -> Dict[str, Any]: +) -> dict[str, Any]: """Deserialize the record value dict from ProtoBuf.""" return {k: _record_value_from_proto(v) for k, v in value_dict_proto.items()} @@ -498,7 +515,7 @@ def metrics_record_from_proto(record_proto: ProtoMetricsRecord) -> MetricsRecord """Deserialize MetricsRecord from ProtoBuf.""" return MetricsRecord( metrics_dict=cast( - Dict[str, typing.MetricsRecordValues], + dict[str, typing.MetricsRecordValues], _record_value_dict_from_proto(record_proto.data), ), keep_input=False, @@ -520,7 +537,7 @@ def configs_record_from_proto(record_proto: ProtoConfigsRecord) -> ConfigsRecord """Deserialize ConfigsRecord from ProtoBuf.""" return ConfigsRecord( configs_dict=cast( - Dict[str, typing.ConfigsRecordValues], + dict[str, typing.ConfigsRecordValues], _record_value_dict_from_proto(record_proto.data), ), keep_input=False, diff --git a/src/py/flwr/common/serde_test.py b/src/py/flwr/common/serde_test.py index 013d04a32fd4..19e9889158a0 100644 --- a/src/py/flwr/common/serde_test.py +++ b/src/py/flwr/common/serde_test.py @@ -16,7 +16,8 @@ import random import string -from typing import Any, Callable, Optional, OrderedDict, Type, TypeVar, Union, cast +from collections import OrderedDict +from typing import Any, Callable, Optional, TypeVar, Union, cast import pytest @@ -79,7 +80,7 @@ def test_serialisation_deserialisation() -> None: """Test if the np.ndarray is identical after (de-)serialization.""" # Prepare - scalars = [True, b"bytestr", 3.14, 9000, "Hello"] + scalars = [True, b"bytestr", 3.14, 9000, "Hello", (1 << 63) + 1] for scalar in scalars: # Execute @@ -169,7 +170,7 @@ def get_str(self, length: Optional[int] = None) -> str: length = self.rng.randint(1, 10) return "".join(self.rng.choices(char_pool, k=length)) - def get_value(self, dtype: Type[T]) -> T: + def get_value(self, dtype: Union[type[T], str]) -> T: """Create a value of a given type.""" ret: Any = None if dtype == bool: @@ -177,11 +178,13 @@ def get_value(self, dtype: Type[T]) -> T: elif dtype == str: ret = self.get_str(self.rng.randint(10, 100)) elif dtype == int: - ret = self.rng.randint(-1 << 30, 1 << 30) + ret = self.rng.randint(-1 << 63, (1 << 63) - 1) elif dtype == float: ret = (self.rng.random() - 0.5) * (2.0 ** self.rng.randint(0, 50)) elif dtype == bytes: ret = self.randbytes(self.rng.randint(10, 100)) + elif dtype == "uint": + ret = self.rng.randint(0, (1 << 64) - 1) else: raise NotImplementedError(f"Unsupported dtype: {dtype}") return cast(T, ret) @@ -315,6 +318,8 @@ def test_metrics_record_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = maker.metrics_record() + original["uint64"] = (1 << 63) + 321 + original["list of uint64"] = [maker.get_value("uint") for _ in range(30)] # Execute proto = metrics_record_to_proto(original) @@ -330,6 +335,8 @@ def test_configs_record_serialization_deserialization() -> None: # Prepare maker = RecordMaker() original = maker.configs_record() + original["uint64"] = (1 << 63) + 101 + original["list of uint64"] = [maker.get_value("uint") for _ in range(100)] # Execute proto = configs_record_to_proto(original) diff --git a/src/py/flwr/common/telemetry.py b/src/py/flwr/common/telemetry.py index 399f400b7edc..724f36d2b98f 100644 --- a/src/py/flwr/common/telemetry.py +++ b/src/py/flwr/common/telemetry.py @@ -25,7 +25,7 @@ from concurrent.futures import Future, ThreadPoolExecutor from enum import Enum, auto from pathlib import Path -from typing import Any, Dict, List, Optional, Union, cast +from typing import Any, Optional, Union, cast from flwr.common.version import package_name, package_version @@ -126,64 +126,70 @@ class EventType(str, Enum): # The type signature is not compatible with mypy, pylint and flake8 # so each of those needs to be disabled for this line. # pylint: disable-next=no-self-argument,arguments-differ,line-too-long - def _generate_next_value_(name: str, start: int, count: int, last_values: List[Any]) -> Any: # type: ignore # noqa: E501 + def _generate_next_value_(name: str, start: int, count: int, last_values: list[Any]) -> Any: # type: ignore # noqa: E501 return name # Ping PING = auto() - # Client: start_client + # --- LEGACY FUNCTIONS ------------------------------------------------------------- + + # Legacy: `start_client` function START_CLIENT_ENTER = auto() START_CLIENT_LEAVE = auto() - # Server: start_server + # Legacy: `start_server` function START_SERVER_ENTER = auto() START_SERVER_LEAVE = auto() - # Driver API - RUN_DRIVER_API_ENTER = auto() - RUN_DRIVER_API_LEAVE = auto() + # Legacy: `start_simulation` function + START_SIMULATION_ENTER = auto() + START_SIMULATION_LEAVE = auto() - # Fleet API - RUN_FLEET_API_ENTER = auto() - RUN_FLEET_API_LEAVE = auto() + # --- `flwr` CLI ------------------------------------------------------------------- - # Driver API and Fleet API - RUN_SUPERLINK_ENTER = auto() - RUN_SUPERLINK_LEAVE = auto() + # Not yet implemented - # Simulation - START_SIMULATION_ENTER = auto() - START_SIMULATION_LEAVE = auto() + # --- SuperExec -------------------------------------------------------------------- - # Driver: Driver - DRIVER_CONNECT = auto() - DRIVER_DISCONNECT = auto() + # SuperExec + RUN_SUPEREXEC_ENTER = auto() + RUN_SUPEREXEC_LEAVE = auto() - # Driver: start_driver - START_DRIVER_ENTER = auto() - START_DRIVER_LEAVE = auto() + # --- Simulation Engine ------------------------------------------------------------ - # flower-client-app - RUN_CLIENT_APP_ENTER = auto() - RUN_CLIENT_APP_LEAVE = auto() + # CLI: flower-simulation + CLI_FLOWER_SIMULATION_ENTER = auto() + CLI_FLOWER_SIMULATION_LEAVE = auto() - # flower-server-app - RUN_SERVER_APP_ENTER = auto() - RUN_SERVER_APP_LEAVE = auto() + # Python API: `run_simulation` + PYTHON_API_RUN_SIMULATION_ENTER = auto() + PYTHON_API_RUN_SIMULATION_LEAVE = auto() - # SuperNode + # --- Deployment Engine ------------------------------------------------------------ + + # CLI: `flower-superlink` + RUN_SUPERLINK_ENTER = auto() + RUN_SUPERLINK_LEAVE = auto() + + # CLI: `flower-supernode` RUN_SUPERNODE_ENTER = auto() RUN_SUPERNODE_LEAVE = auto() - # SuperExec - RUN_SUPEREXEC_ENTER = auto() - RUN_SUPEREXEC_LEAVE = auto() + # CLI: `flower-server-app` + RUN_SERVER_APP_ENTER = auto() + RUN_SERVER_APP_LEAVE = auto() + + # --- DEPRECATED ------------------------------------------------------------------- + + # [DEPRECATED] CLI: `flower-client-app` + RUN_CLIENT_APP_ENTER = auto() + RUN_CLIENT_APP_LEAVE = auto() # Use the ThreadPoolExecutor with max_workers=1 to have a queue # and also ensure that telemetry calls are not blocking. -state: Dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { +state: dict[str, Union[Optional[str], Optional[ThreadPoolExecutor]]] = { # Will be assigned ThreadPoolExecutor(max_workers=1) # in event() the first time it's required "executor": None, @@ -195,7 +201,7 @@ def _generate_next_value_(name: str, start: int, count: int, last_values: List[A def event( event_type: EventType, - event_details: Optional[Dict[str, Any]] = None, + event_details: Optional[dict[str, Any]] = None, ) -> Future: # type: ignore """Submit create_event to ThreadPoolExecutor to avoid blocking.""" if state["executor"] is None: @@ -207,7 +213,7 @@ def event( return result -def create_event(event_type: EventType, event_details: Optional[Dict[str, Any]]) -> str: +def create_event(event_type: EventType, event_details: Optional[dict[str, Any]]) -> str: """Create telemetry event.""" if state["source"] is None: state["source"] = _get_source_id() diff --git a/src/py/flwr/common/typing.py b/src/py/flwr/common/typing.py index b1dec8d0420b..081a957f28ff 100644 --- a/src/py/flwr/common/typing.py +++ b/src/py/flwr/common/typing.py @@ -17,7 +17,7 @@ from dataclasses import dataclass from enum import Enum -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union import numpy as np import numpy.typing as npt @@ -25,7 +25,7 @@ NDArray = npt.NDArray[Any] NDArrayInt = npt.NDArray[np.int_] NDArrayFloat = npt.NDArray[np.float_] -NDArrays = List[NDArray] +NDArrays = list[NDArray] # The following union type contains Python types corresponding to ProtoBuf types that # ProtoBuf considers to be "Scalar Value Types", even though some of them arguably do @@ -38,31 +38,31 @@ float, int, str, - List[bool], - List[bytes], - List[float], - List[int], - List[str], + list[bool], + list[bytes], + list[float], + list[int], + list[str], ] # Value types for common.MetricsRecord MetricsScalar = Union[int, float] -MetricsScalarList = Union[List[int], List[float]] +MetricsScalarList = Union[list[int], list[float]] MetricsRecordValues = Union[MetricsScalar, MetricsScalarList] # Value types for common.ConfigsRecord ConfigsScalar = Union[MetricsScalar, str, bytes, bool] -ConfigsScalarList = Union[MetricsScalarList, List[str], List[bytes], List[bool]] +ConfigsScalarList = Union[MetricsScalarList, list[str], list[bytes], list[bool]] ConfigsRecordValues = Union[ConfigsScalar, ConfigsScalarList] -Metrics = Dict[str, Scalar] -MetricsAggregationFn = Callable[[List[Tuple[int, Metrics]]], Metrics] +Metrics = dict[str, Scalar] +MetricsAggregationFn = Callable[[list[tuple[int, Metrics]]], Metrics] -Config = Dict[str, Scalar] -Properties = Dict[str, Scalar] +Config = dict[str, Scalar] +Properties = dict[str, Scalar] # Value type for user configs UserConfigValue = Union[bool, float, int, str] -UserConfig = Dict[str, UserConfigValue] +UserConfig = dict[str, UserConfigValue] class Code(Enum): @@ -103,7 +103,7 @@ class ClientAppOutputStatus: class Parameters: """Model parameters.""" - tensors: List[bytes] + tensors: list[bytes] tensor_type: str @@ -127,7 +127,7 @@ class FitIns: """Fit instructions for a client.""" parameters: Parameters - config: Dict[str, Scalar] + config: dict[str, Scalar] @dataclass @@ -137,7 +137,7 @@ class FitRes: status: Status parameters: Parameters num_examples: int - metrics: Dict[str, Scalar] + metrics: dict[str, Scalar] @dataclass @@ -145,7 +145,7 @@ class EvaluateIns: """Evaluate instructions for a client.""" parameters: Parameters - config: Dict[str, Scalar] + config: dict[str, Scalar] @dataclass @@ -155,7 +155,7 @@ class EvaluateRes: status: Status loss: float num_examples: int - metrics: Dict[str, Scalar] + metrics: dict[str, Scalar] @dataclass diff --git a/src/py/flwr/common/version.py b/src/py/flwr/common/version.py index ac13f70d8a88..141c16ac9367 100644 --- a/src/py/flwr/common/version.py +++ b/src/py/flwr/common/version.py @@ -15,15 +15,14 @@ """Flower package version helper.""" import importlib.metadata as importlib_metadata -from typing import Tuple -def _check_package(name: str) -> Tuple[str, str]: +def _check_package(name: str) -> tuple[str, str]: version: str = importlib_metadata.version(name) return name, version -def _version() -> Tuple[str, str]: +def _version() -> tuple[str, str]: """Read and return Flower package name and version. Returns diff --git a/src/py/flwr/proto/clientappio_pb2.py b/src/py/flwr/proto/clientappio_pb2.py index 9fd5302fe6cd..3fdc9f8a6ece 100644 --- a/src/py/flwr/proto/clientappio_pb2.py +++ b/src/py/flwr/proto/clientappio_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import message_pb2 as flwr_dot_proto_dot_message__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lwr/proto/clientappio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x18\x66lwr/proto/message.proto\"W\n\x15\x43lientAppOutputStatus\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.flwr.proto.ClientAppOutputCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x11\n\x0fGetTokenRequest\"!\n\x10GetTokenResponse\x12\r\n\x05token\x18\x01 \x01(\x12\"+\n\x1aPullClientAppInputsRequest\x12\r\n\x05token\x18\x01 \x01(\x12\"\xa5\x01\n\x1bPullClientAppInputsResponse\x12$\n\x07message\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\"x\n\x1bPushClientAppOutputsRequest\x12\r\n\x05token\x18\x01 \x01(\x12\x12$\n\x07message\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x03 \x01(\x0b\x32\x13.flwr.proto.Context\"Q\n\x1cPushClientAppOutputsResponse\x12\x31\n\x06status\x18\x01 \x01(\x0b\x32!.flwr.proto.ClientAppOutputStatus*L\n\x13\x43lientAppOutputCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x32\xad\x02\n\x0b\x43lientAppIo\x12G\n\x08GetToken\x12\x1b.flwr.proto.GetTokenRequest\x1a\x1c.flwr.proto.GetTokenResponse\"\x00\x12h\n\x13PullClientAppInputs\x12&.flwr.proto.PullClientAppInputsRequest\x1a\'.flwr.proto.PullClientAppInputsResponse\"\x00\x12k\n\x14PushClientAppOutputs\x12\'.flwr.proto.PushClientAppOutputsRequest\x1a(.flwr.proto.PushClientAppOutputsResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1c\x66lwr/proto/clientappio.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x18\x66lwr/proto/message.proto\"W\n\x15\x43lientAppOutputStatus\x12-\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x1f.flwr.proto.ClientAppOutputCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x11\n\x0fGetTokenRequest\"!\n\x10GetTokenResponse\x12\r\n\x05token\x18\x01 \x01(\x04\"+\n\x1aPullClientAppInputsRequest\x12\r\n\x05token\x18\x01 \x01(\x04\"\xa5\x01\n\x1bPullClientAppInputsResponse\x12$\n\x07message\x18\x01 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Context\x12\x1c\n\x03run\x18\x03 \x01(\x0b\x32\x0f.flwr.proto.Run\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\"x\n\x1bPushClientAppOutputsRequest\x12\r\n\x05token\x18\x01 \x01(\x04\x12$\n\x07message\x18\x02 \x01(\x0b\x32\x13.flwr.proto.Message\x12$\n\x07\x63ontext\x18\x03 \x01(\x0b\x32\x13.flwr.proto.Context\"Q\n\x1cPushClientAppOutputsResponse\x12\x31\n\x06status\x18\x01 \x01(\x0b\x32!.flwr.proto.ClientAppOutputStatus*L\n\x13\x43lientAppOutputCode\x12\x0b\n\x07SUCCESS\x10\x00\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x01\x12\x11\n\rUNKNOWN_ERROR\x10\x02\x32\xad\x02\n\x0b\x43lientAppIo\x12G\n\x08GetToken\x12\x1b.flwr.proto.GetTokenRequest\x1a\x1c.flwr.proto.GetTokenResponse\"\x00\x12h\n\x13PullClientAppInputs\x12&.flwr.proto.PullClientAppInputsRequest\x1a\'.flwr.proto.PullClientAppInputsResponse\"\x00\x12k\n\x14PushClientAppOutputs\x12\'.flwr.proto.PushClientAppOutputsRequest\x1a(.flwr.proto.PushClientAppOutputsResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/control_pb2.py b/src/py/flwr/proto/control_pb2.py new file mode 100644 index 000000000000..eb1c18d8dcff --- /dev/null +++ b/src/py/flwr/proto/control_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flwr/proto/control.proto +# Protobuf Python Version: 4.25.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/control.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/run.proto2\x88\x02\n\x07\x43ontrol\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12S\n\x0cGetRunStatus\x12\x1f.flwr.proto.GetRunStatusRequest\x1a .flwr.proto.GetRunStatusResponse\"\x00\x12\\\n\x0fUpdateRunStatus\x12\".flwr.proto.UpdateRunStatusRequest\x1a#.flwr.proto.UpdateRunStatusResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.control_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_CONTROL']._serialized_start=63 + _globals['_CONTROL']._serialized_end=327 +# @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/control_pb2.pyi b/src/py/flwr/proto/control_pb2.pyi new file mode 100644 index 000000000000..e08fa11c2caa --- /dev/null +++ b/src/py/flwr/proto/control_pb2.pyi @@ -0,0 +1,7 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import google.protobuf.descriptor + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor diff --git a/src/py/flwr/proto/control_pb2_grpc.py b/src/py/flwr/proto/control_pb2_grpc.py new file mode 100644 index 000000000000..a59f90f15935 --- /dev/null +++ b/src/py/flwr/proto/control_pb2_grpc.py @@ -0,0 +1,135 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 + + +class ControlStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateRun = channel.unary_unary( + '/flwr.proto.Control/CreateRun', + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + ) + self.GetRunStatus = channel.unary_unary( + '/flwr.proto.Control/GetRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.FromString, + ) + self.UpdateRunStatus = channel.unary_unary( + '/flwr.proto.Control/UpdateRunStatus', + request_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + ) + + +class ControlServicer(object): + """Missing associated documentation comment in .proto file.""" + + def CreateRun(self, request, context): + """Request to create a new run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetRunStatus(self, request, context): + """Get the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateRunStatus(self, request, context): + """Update the status of a given run + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_ControlServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateRun': grpc.unary_unary_rpc_method_handler( + servicer.CreateRun, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, + ), + 'GetRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.GetRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.SerializeToString, + ), + 'UpdateRunStatus': grpc.unary_unary_rpc_method_handler( + servicer.UpdateRunStatus, + request_deserializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flwr.proto.Control', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Control(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def CreateRun(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/CreateRun', + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/GetRunStatus', + flwr_dot_proto_dot_run__pb2.GetRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.GetRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateRunStatus(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flwr.proto.Control/UpdateRunStatus', + flwr_dot_proto_dot_run__pb2.UpdateRunStatusRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.UpdateRunStatusResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/control_pb2_grpc.pyi b/src/py/flwr/proto/control_pb2_grpc.pyi new file mode 100644 index 000000000000..7817e2b12e31 --- /dev/null +++ b/src/py/flwr/proto/control_pb2_grpc.pyi @@ -0,0 +1,53 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" +import abc +import flwr.proto.run_pb2 +import grpc + +class ControlStub: + def __init__(self, channel: grpc.Channel) -> None: ... + CreateRun: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] + """Request to create a new run""" + + GetRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.GetRunStatusRequest, + flwr.proto.run_pb2.GetRunStatusResponse] + """Get the status of a given run""" + + UpdateRunStatus: grpc.UnaryUnaryMultiCallable[ + flwr.proto.run_pb2.UpdateRunStatusRequest, + flwr.proto.run_pb2.UpdateRunStatusResponse] + """Update the status of a given run""" + + +class ControlServicer(metaclass=abc.ABCMeta): + @abc.abstractmethod + def CreateRun(self, + request: flwr.proto.run_pb2.CreateRunRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.CreateRunResponse: + """Request to create a new run""" + pass + + @abc.abstractmethod + def GetRunStatus(self, + request: flwr.proto.run_pb2.GetRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.GetRunStatusResponse: + """Get the status of a given run""" + pass + + @abc.abstractmethod + def UpdateRunStatus(self, + request: flwr.proto.run_pb2.UpdateRunStatusRequest, + context: grpc.ServicerContext, + ) -> flwr.proto.run_pb2.UpdateRunStatusResponse: + """Update the status of a given run""" + pass + + +def add_ControlServicer_to_server(servicer: ControlServicer, server: grpc.Server) -> None: ... diff --git a/src/py/flwr/proto/driver_pb2.py b/src/py/flwr/proto/driver_pb2.py index dde72620f5bf..d294b03be5af 100644 --- a/src/py/flwr/proto/driver_pb2.py +++ b/src/py/flwr/proto/driver_pb2.py @@ -16,36 +16,27 @@ from flwr.proto import task_pb2 as flwr_dot_proto_dot_task__pb2 from flwr.proto import run_pb2 as flwr_dot_proto_dot_run__pb2 from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc7\x03\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x66lwr/proto/driver.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"!\n\x0fGetNodesRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"3\n\x10GetNodesResponse\x12\x1f\n\x05nodes\x18\x01 \x03(\x0b\x32\x10.flwr.proto.Node\"@\n\x12PushTaskInsRequest\x12*\n\rtask_ins_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"\'\n\x13PushTaskInsResponse\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"F\n\x12PullTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"A\n\x13PullTaskResResponse\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes2\xc7\x03\n\x06\x44river\x12J\n\tCreateRun\x12\x1c.flwr.proto.CreateRunRequest\x1a\x1d.flwr.proto.CreateRunResponse\"\x00\x12G\n\x08GetNodes\x12\x1b.flwr.proto.GetNodesRequest\x1a\x1c.flwr.proto.GetNodesResponse\"\x00\x12P\n\x0bPushTaskIns\x12\x1e.flwr.proto.PushTaskInsRequest\x1a\x1f.flwr.proto.PushTaskInsResponse\"\x00\x12P\n\x0bPullTaskRes\x12\x1e.flwr.proto.PullTaskResRequest\x1a\x1f.flwr.proto.PullTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.driver_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._options = None - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' - _globals['_CREATERUNREQUEST']._serialized_start=158 - _globals['_CREATERUNREQUEST']._serialized_end=393 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=320 - _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=393 - _globals['_CREATERUNRESPONSE']._serialized_start=395 - _globals['_CREATERUNRESPONSE']._serialized_end=430 - _globals['_GETNODESREQUEST']._serialized_start=432 - _globals['_GETNODESREQUEST']._serialized_end=465 - _globals['_GETNODESRESPONSE']._serialized_start=467 - _globals['_GETNODESRESPONSE']._serialized_end=518 - _globals['_PUSHTASKINSREQUEST']._serialized_start=520 - _globals['_PUSHTASKINSREQUEST']._serialized_end=584 - _globals['_PUSHTASKINSRESPONSE']._serialized_start=586 - _globals['_PUSHTASKINSRESPONSE']._serialized_end=625 - _globals['_PULLTASKRESREQUEST']._serialized_start=627 - _globals['_PULLTASKRESREQUEST']._serialized_end=697 - _globals['_PULLTASKRESRESPONSE']._serialized_start=699 - _globals['_PULLTASKRESRESPONSE']._serialized_end=764 - _globals['_DRIVER']._serialized_start=767 - _globals['_DRIVER']._serialized_end=1222 + _globals['_GETNODESREQUEST']._serialized_start=129 + _globals['_GETNODESREQUEST']._serialized_end=162 + _globals['_GETNODESRESPONSE']._serialized_start=164 + _globals['_GETNODESRESPONSE']._serialized_end=215 + _globals['_PUSHTASKINSREQUEST']._serialized_start=217 + _globals['_PUSHTASKINSREQUEST']._serialized_end=281 + _globals['_PUSHTASKINSRESPONSE']._serialized_start=283 + _globals['_PUSHTASKINSRESPONSE']._serialized_end=322 + _globals['_PULLTASKRESREQUEST']._serialized_start=324 + _globals['_PULLTASKRESREQUEST']._serialized_end=394 + _globals['_PULLTASKRESRESPONSE']._serialized_start=396 + _globals['_PULLTASKRESRESPONSE']._serialized_end=461 + _globals['_DRIVER']._serialized_start=464 + _globals['_DRIVER']._serialized_end=919 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/driver_pb2.pyi b/src/py/flwr/proto/driver_pb2.pyi index d025e00474eb..77ceb496d70c 100644 --- a/src/py/flwr/proto/driver_pb2.pyi +++ b/src/py/flwr/proto/driver_pb2.pyi @@ -3,10 +3,8 @@ isort:skip_file """ import builtins -import flwr.proto.fab_pb2 import flwr.proto.node_pb2 import flwr.proto.task_pb2 -import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.message @@ -15,56 +13,6 @@ import typing_extensions DESCRIPTOR: google.protobuf.descriptor.FileDescriptor -class CreateRunRequest(google.protobuf.message.Message): - """CreateRun""" - DESCRIPTOR: google.protobuf.descriptor.Descriptor - class OverrideConfigEntry(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - KEY_FIELD_NUMBER: builtins.int - VALUE_FIELD_NUMBER: builtins.int - key: typing.Text - @property - def value(self) -> flwr.proto.transport_pb2.Scalar: ... - def __init__(self, - *, - key: typing.Text = ..., - value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - - FAB_ID_FIELD_NUMBER: builtins.int - FAB_VERSION_FIELD_NUMBER: builtins.int - OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int - FAB_FIELD_NUMBER: builtins.int - fab_id: typing.Text - fab_version: typing.Text - @property - def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... - @property - def fab(self) -> flwr.proto.fab_pb2.Fab: ... - def __init__(self, - *, - fab_id: typing.Text = ..., - fab_version: typing.Text = ..., - override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., - fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config"]) -> None: ... -global___CreateRunRequest = CreateRunRequest - -class CreateRunResponse(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor - RUN_ID_FIELD_NUMBER: builtins.int - run_id: builtins.int - def __init__(self, - *, - run_id: builtins.int = ..., - ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... -global___CreateRunResponse = CreateRunResponse - class GetNodesRequest(google.protobuf.message.Message): """GetNodes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor diff --git a/src/py/flwr/proto/driver_pb2_grpc.py b/src/py/flwr/proto/driver_pb2_grpc.py index 6745bc7af62a..91e9fd8b9bdd 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.py +++ b/src/py/flwr/proto/driver_pb2_grpc.py @@ -18,8 +18,8 @@ def __init__(self, channel): """ self.CreateRun = channel.unary_unary( '/flwr.proto.Driver/CreateRun', - request_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, - response_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, + request_serializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + response_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, ) self.GetNodes = channel.unary_unary( '/flwr.proto.Driver/GetNodes', @@ -98,8 +98,8 @@ def add_DriverServicer_to_server(servicer, server): rpc_method_handlers = { 'CreateRun': grpc.unary_unary_rpc_method_handler( servicer.CreateRun, - request_deserializer=flwr_dot_proto_dot_driver__pb2.CreateRunRequest.FromString, - response_serializer=flwr_dot_proto_dot_driver__pb2.CreateRunResponse.SerializeToString, + request_deserializer=flwr_dot_proto_dot_run__pb2.CreateRunRequest.FromString, + response_serializer=flwr_dot_proto_dot_run__pb2.CreateRunResponse.SerializeToString, ), 'GetNodes': grpc.unary_unary_rpc_method_handler( servicer.GetNodes, @@ -148,8 +148,8 @@ def CreateRun(request, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/flwr.proto.Driver/CreateRun', - flwr_dot_proto_dot_driver__pb2.CreateRunRequest.SerializeToString, - flwr_dot_proto_dot_driver__pb2.CreateRunResponse.FromString, + flwr_dot_proto_dot_run__pb2.CreateRunRequest.SerializeToString, + flwr_dot_proto_dot_run__pb2.CreateRunResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/src/py/flwr/proto/driver_pb2_grpc.pyi b/src/py/flwr/proto/driver_pb2_grpc.pyi index 7f9fd0acbd82..8f665301073d 100644 --- a/src/py/flwr/proto/driver_pb2_grpc.pyi +++ b/src/py/flwr/proto/driver_pb2_grpc.pyi @@ -11,8 +11,8 @@ import grpc class DriverStub: def __init__(self, channel: grpc.Channel) -> None: ... CreateRun: grpc.UnaryUnaryMultiCallable[ - flwr.proto.driver_pb2.CreateRunRequest, - flwr.proto.driver_pb2.CreateRunResponse] + flwr.proto.run_pb2.CreateRunRequest, + flwr.proto.run_pb2.CreateRunResponse] """Request run_id""" GetNodes: grpc.UnaryUnaryMultiCallable[ @@ -44,9 +44,9 @@ class DriverStub: class DriverServicer(metaclass=abc.ABCMeta): @abc.abstractmethod def CreateRun(self, - request: flwr.proto.driver_pb2.CreateRunRequest, + request: flwr.proto.run_pb2.CreateRunRequest, context: grpc.ServicerContext, - ) -> flwr.proto.driver_pb2.CreateRunResponse: + ) -> flwr.proto.run_pb2.CreateRunResponse: """Request run_id""" pass diff --git a/src/py/flwr/proto/exec_pb2.py b/src/py/flwr/proto/exec_pb2.py index 3fe109067296..574f39eaa18d 100644 --- a/src/py/flwr/proto/exec_pb2.py +++ b/src/py/flwr/proto/exec_pb2.py @@ -16,7 +16,7 @@ from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xdf\x02\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12L\n\x11\x66\x65\x64\x65ration_config\x18\x03 \x03(\x0b\x32\x31.flwr.proto.StartRunRequest.FederationConfigEntry\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1aK\n\x15\x46\x65\x64\x65rationConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"#\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\"(\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t2\xa0\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/exec.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xdf\x02\n\x0fStartRunRequest\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fab\x12H\n\x0foverride_config\x18\x02 \x03(\x0b\x32/.flwr.proto.StartRunRequest.OverrideConfigEntry\x12L\n\x11\x66\x65\x64\x65ration_config\x18\x03 \x03(\x0b\x32\x31.flwr.proto.StartRunRequest.FederationConfigEntry\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1aK\n\x15\x46\x65\x64\x65rationConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\"\n\x10StartRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"#\n\x11StreamLogsRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"(\n\x12StreamLogsResponse\x12\x12\n\nlog_output\x18\x01 \x01(\t2\xa0\x01\n\x04\x45xec\x12G\n\x08StartRun\x12\x1b.flwr.proto.StartRunRequest\x1a\x1c.flwr.proto.StartRunResponse\"\x00\x12O\n\nStreamLogs\x12\x1d.flwr.proto.StreamLogsRequest\x1a\x1e.flwr.proto.StreamLogsResponse\"\x00\x30\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/fab_pb2.py b/src/py/flwr/proto/fab_pb2.py index 3f04e6693ab8..3a5e50000c10 100644 --- a/src/py/flwr/proto/fab_pb2.py +++ b/src/py/flwr/proto/fab_pb2.py @@ -12,19 +12,20 @@ _sym_db = _symbol_database.Default() +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/fab.proto\x12\nflwr.proto\"(\n\x03\x46\x61\x62\x12\x10\n\x08hash_str\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"!\n\rGetFabRequest\x12\x10\n\x08hash_str\x18\x01 \x01(\t\".\n\x0eGetFabResponse\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fabb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/fab.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\"(\n\x03\x46\x61\x62\x12\x10\n\x08hash_str\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\x0c\"A\n\rGetFabRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08hash_str\x18\x02 \x01(\t\".\n\x0eGetFabResponse\x12\x1c\n\x03\x66\x61\x62\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Fabb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flwr.proto.fab_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None - _globals['_FAB']._serialized_start=36 - _globals['_FAB']._serialized_end=76 - _globals['_GETFABREQUEST']._serialized_start=78 - _globals['_GETFABREQUEST']._serialized_end=111 - _globals['_GETFABRESPONSE']._serialized_start=113 - _globals['_GETFABRESPONSE']._serialized_end=159 + _globals['_FAB']._serialized_start=59 + _globals['_FAB']._serialized_end=99 + _globals['_GETFABREQUEST']._serialized_start=101 + _globals['_GETFABREQUEST']._serialized_end=166 + _globals['_GETFABRESPONSE']._serialized_start=168 + _globals['_GETFABRESPONSE']._serialized_end=214 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fab_pb2.pyi b/src/py/flwr/proto/fab_pb2.pyi index b2715dde5021..8cfdcbaf76ad 100644 --- a/src/py/flwr/proto/fab_pb2.pyi +++ b/src/py/flwr/proto/fab_pb2.pyi @@ -3,6 +3,7 @@ isort:skip_file """ import builtins +import flwr.proto.node_pb2 import google.protobuf.descriptor import google.protobuf.message import typing @@ -33,13 +34,18 @@ global___Fab = Fab class GetFabRequest(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int HASH_STR_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... hash_str: typing.Text def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., hash_str: typing.Text = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["hash_str",b"hash_str"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["hash_str",b"hash_str","node",b"node"]) -> None: ... global___GetFabRequest = GetFabRequest class GetFabResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/fleet_pb2.py b/src/py/flwr/proto/fleet_pb2.py index d1fe719f2d91..3185bc2ce111 100644 --- a/src/py/flwr/proto/fleet_pb2.py +++ b/src/py/flwr/proto/fleet_pb2.py @@ -18,7 +18,7 @@ from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"@\n\x12PushTaskResRequest\x12*\n\rtask_res_list\x18\x01 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x8c\x04\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16\x66lwr/proto/fleet.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x15\x66lwr/proto/task.proto\x1a\x14\x66lwr/proto/run.proto\x1a\x14\x66lwr/proto/fab.proto\"*\n\x11\x43reateNodeRequest\x12\x15\n\rping_interval\x18\x01 \x01(\x01\"4\n\x12\x43reateNodeResponse\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"3\n\x11\x44\x65leteNodeRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\"\x14\n\x12\x44\x65leteNodeResponse\"D\n\x0bPingRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x15\n\rping_interval\x18\x02 \x01(\x01\"\x1f\n\x0cPingResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\"F\n\x12PullTaskInsRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x10\n\x08task_ids\x18\x02 \x03(\t\"k\n\x13PullTaskInsResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12*\n\rtask_ins_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskIns\"`\n\x12PushTaskResRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12*\n\rtask_res_list\x18\x02 \x03(\x0b\x32\x13.flwr.proto.TaskRes\"\xae\x01\n\x13PushTaskResResponse\x12(\n\treconnect\x18\x01 \x01(\x0b\x32\x15.flwr.proto.Reconnect\x12=\n\x07results\x18\x02 \x03(\x0b\x32,.flwr.proto.PushTaskResResponse.ResultsEntry\x1a.\n\x0cResultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\r:\x02\x38\x01\"\x1e\n\tReconnect\x12\x11\n\treconnect\x18\x01 \x01(\x04\x32\x8c\x04\n\x05\x46leet\x12M\n\nCreateNode\x12\x1d.flwr.proto.CreateNodeRequest\x1a\x1e.flwr.proto.CreateNodeResponse\"\x00\x12M\n\nDeleteNode\x12\x1d.flwr.proto.DeleteNodeRequest\x1a\x1e.flwr.proto.DeleteNodeResponse\"\x00\x12;\n\x04Ping\x12\x17.flwr.proto.PingRequest\x1a\x18.flwr.proto.PingResponse\"\x00\x12P\n\x0bPullTaskIns\x12\x1e.flwr.proto.PullTaskInsRequest\x1a\x1f.flwr.proto.PullTaskInsResponse\"\x00\x12P\n\x0bPushTaskRes\x12\x1e.flwr.proto.PushTaskResRequest\x1a\x1f.flwr.proto.PushTaskResResponse\"\x00\x12\x41\n\x06GetRun\x12\x19.flwr.proto.GetRunRequest\x1a\x1a.flwr.proto.GetRunResponse\"\x00\x12\x41\n\x06GetFab\x12\x19.flwr.proto.GetFabRequest\x1a\x1a.flwr.proto.GetFabResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -44,13 +44,13 @@ _globals['_PULLTASKINSRESPONSE']._serialized_start=476 _globals['_PULLTASKINSRESPONSE']._serialized_end=583 _globals['_PUSHTASKRESREQUEST']._serialized_start=585 - _globals['_PUSHTASKRESREQUEST']._serialized_end=649 - _globals['_PUSHTASKRESRESPONSE']._serialized_start=652 - _globals['_PUSHTASKRESRESPONSE']._serialized_end=826 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=780 - _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=826 - _globals['_RECONNECT']._serialized_start=828 - _globals['_RECONNECT']._serialized_end=858 - _globals['_FLEET']._serialized_start=861 - _globals['_FLEET']._serialized_end=1385 + _globals['_PUSHTASKRESREQUEST']._serialized_end=681 + _globals['_PUSHTASKRESRESPONSE']._serialized_start=684 + _globals['_PUSHTASKRESRESPONSE']._serialized_end=858 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_start=812 + _globals['_PUSHTASKRESRESPONSE_RESULTSENTRY']._serialized_end=858 + _globals['_RECONNECT']._serialized_start=860 + _globals['_RECONNECT']._serialized_end=890 + _globals['_FLEET']._serialized_start=893 + _globals['_FLEET']._serialized_end=1417 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/fleet_pb2.pyi b/src/py/flwr/proto/fleet_pb2.pyi index 5989f45c5c60..76875bc1a4b9 100644 --- a/src/py/flwr/proto/fleet_pb2.pyi +++ b/src/py/flwr/proto/fleet_pb2.pyi @@ -124,14 +124,19 @@ global___PullTaskInsResponse = PullTaskInsResponse class PushTaskResRequest(google.protobuf.message.Message): """PushTaskRes messages""" DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int TASK_RES_LIST_FIELD_NUMBER: builtins.int @property + def node(self) -> flwr.proto.node_pb2.Node: ... + @property def task_res_list(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[flwr.proto.task_pb2.TaskRes]: ... def __init__(self, *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., task_res_list: typing.Optional[typing.Iterable[flwr.proto.task_pb2.TaskRes]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["task_res_list",b"task_res_list"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","task_res_list",b"task_res_list"]) -> None: ... global___PushTaskResRequest = PushTaskResRequest class PushTaskResResponse(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/message_pb2.py b/src/py/flwr/proto/message_pb2.py index 7e2555972a8a..d2201cb07b56 100644 --- a/src/py/flwr/proto/message_pb2.py +++ b/src/py/flwr/proto/message_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xbf\x02\n\x07\x43ontext\x12\x0f\n\x07node_id\x18\x01 \x01(\x12\x12\x38\n\x0bnode_config\x18\x02 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x03 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x04 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x12\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x12\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66lwr/proto/message.proto\x12\nflwr.proto\x1a\x16\x66lwr/proto/error.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x1a\x66lwr/proto/transport.proto\"{\n\x07Message\x12&\n\x08metadata\x18\x01 \x01(\x0b\x32\x14.flwr.proto.Metadata\x12&\n\x07\x63ontent\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\x03 \x01(\x0b\x32\x11.flwr.proto.Error\"\xbf\x02\n\x07\x43ontext\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x38\n\x0bnode_config\x18\x02 \x03(\x0b\x32#.flwr.proto.Context.NodeConfigEntry\x12$\n\x05state\x18\x03 \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12\x36\n\nrun_config\x18\x04 \x03(\x0b\x32\".flwr.proto.Context.RunConfigEntry\x1a\x45\n\x0fNodeConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x44\n\x0eRunConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\xbb\x01\n\x08Metadata\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x12\n\nmessage_id\x18\x02 \x01(\t\x12\x13\n\x0bsrc_node_id\x18\x03 \x01(\x04\x12\x13\n\x0b\x64st_node_id\x18\x04 \x01(\x04\x12\x18\n\x10reply_to_message\x18\x05 \x01(\t\x12\x10\n\x08group_id\x18\x06 \x01(\t\x12\x0b\n\x03ttl\x18\x07 \x01(\x01\x12\x14\n\x0cmessage_type\x18\x08 \x01(\t\x12\x12\n\ncreated_at\x18\t \x01(\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/node_pb2.py b/src/py/flwr/proto/node_pb2.py index b300f2c562c2..f94691db6c3f 100644 --- a/src/py/flwr/proto/node_pb2.py +++ b/src/py/flwr/proto/node_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x12\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/node.proto\x12\nflwr.proto\"*\n\x04Node\x12\x0f\n\x07node_id\x18\x01 \x01(\x04\x12\x11\n\tanonymous\x18\x02 \x01(\x08\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/recordset_pb2.py b/src/py/flwr/proto/recordset_pb2.py index f7f74d72182b..6b169f869ab4 100644 --- a/src/py/flwr/proto/recordset_pb2.py +++ b/src/py/flwr/proto/recordset_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x1a\n\nSint64List\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\x9f\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x42\x07\n\x05value\"\xd9\x02\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\x03 \x01(\x08H\x00\x12\x10\n\x06string\x18\x04 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x05 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12-\n\x0bsint64_list\x18\x16 \x01(\x0b\x32\x16.flwr.proto.Sint64ListH\x00\x12)\n\tbool_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x18 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x19 \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/recordset.proto\x12\nflwr.proto\"\x1a\n\nDoubleList\x12\x0c\n\x04vals\x18\x01 \x03(\x01\"\x18\n\x08SintList\x12\x0c\n\x04vals\x18\x01 \x03(\x12\"\x18\n\x08UintList\x12\x0c\n\x04vals\x18\x01 \x03(\x04\"\x18\n\x08\x42oolList\x12\x0c\n\x04vals\x18\x01 \x03(\x08\"\x1a\n\nStringList\x12\x0c\n\x04vals\x18\x01 \x03(\t\"\x19\n\tBytesList\x12\x0c\n\x04vals\x18\x01 \x03(\x0c\"B\n\x05\x41rray\x12\r\n\x05\x64type\x18\x01 \x01(\t\x12\r\n\x05shape\x18\x02 \x03(\x05\x12\r\n\x05stype\x18\x03 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\"\xd8\x01\n\x12MetricsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x10\n\x06uint64\x18\x03 \x01(\x04H\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12)\n\tsint_list\x18\x16 \x01(\x0b\x32\x14.flwr.proto.SintListH\x00\x12)\n\tuint_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.UintListH\x00\x42\x07\n\x05value\"\x92\x03\n\x12\x43onfigsRecordValue\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x02 \x01(\x12H\x00\x12\x10\n\x06uint64\x18\x03 \x01(\x04H\x00\x12\x0e\n\x04\x62ool\x18\x04 \x01(\x08H\x00\x12\x10\n\x06string\x18\x05 \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x06 \x01(\x0cH\x00\x12-\n\x0b\x64ouble_list\x18\x15 \x01(\x0b\x32\x16.flwr.proto.DoubleListH\x00\x12)\n\tsint_list\x18\x16 \x01(\x0b\x32\x14.flwr.proto.SintListH\x00\x12)\n\tuint_list\x18\x17 \x01(\x0b\x32\x14.flwr.proto.UintListH\x00\x12)\n\tbool_list\x18\x18 \x01(\x0b\x32\x14.flwr.proto.BoolListH\x00\x12-\n\x0bstring_list\x18\x19 \x01(\x0b\x32\x16.flwr.proto.StringListH\x00\x12+\n\nbytes_list\x18\x1a \x01(\x0b\x32\x15.flwr.proto.BytesListH\x00\x42\x07\n\x05value\"M\n\x10ParametersRecord\x12\x11\n\tdata_keys\x18\x01 \x03(\t\x12&\n\x0b\x64\x61ta_values\x18\x02 \x03(\x0b\x32\x11.flwr.proto.Array\"\x8f\x01\n\rMetricsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.MetricsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.MetricsRecordValue:\x02\x38\x01\"\x8f\x01\n\rConfigsRecord\x12\x31\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32#.flwr.proto.ConfigsRecord.DataEntry\x1aK\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.flwr.proto.ConfigsRecordValue:\x02\x38\x01\"\x97\x03\n\tRecordSet\x12\x39\n\nparameters\x18\x01 \x03(\x0b\x32%.flwr.proto.RecordSet.ParametersEntry\x12\x33\n\x07metrics\x18\x02 \x03(\x0b\x32\".flwr.proto.RecordSet.MetricsEntry\x12\x33\n\x07\x63onfigs\x18\x03 \x03(\x0b\x32\".flwr.proto.RecordSet.ConfigsEntry\x1aO\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12+\n\x05value\x18\x02 \x01(\x0b\x32\x1c.flwr.proto.ParametersRecord:\x02\x38\x01\x1aI\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.MetricsRecord:\x02\x38\x01\x1aI\n\x0c\x43onfigsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.flwr.proto.ConfigsRecord:\x02\x38\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -33,36 +33,38 @@ _globals['_RECORDSET_CONFIGSENTRY']._serialized_options = b'8\001' _globals['_DOUBLELIST']._serialized_start=42 _globals['_DOUBLELIST']._serialized_end=68 - _globals['_SINT64LIST']._serialized_start=70 - _globals['_SINT64LIST']._serialized_end=96 - _globals['_BOOLLIST']._serialized_start=98 - _globals['_BOOLLIST']._serialized_end=122 - _globals['_STRINGLIST']._serialized_start=124 - _globals['_STRINGLIST']._serialized_end=150 - _globals['_BYTESLIST']._serialized_start=152 - _globals['_BYTESLIST']._serialized_end=177 - _globals['_ARRAY']._serialized_start=179 - _globals['_ARRAY']._serialized_end=245 - _globals['_METRICSRECORDVALUE']._serialized_start=248 - _globals['_METRICSRECORDVALUE']._serialized_end=407 - _globals['_CONFIGSRECORDVALUE']._serialized_start=410 - _globals['_CONFIGSRECORDVALUE']._serialized_end=755 - _globals['_PARAMETERSRECORD']._serialized_start=757 - _globals['_PARAMETERSRECORD']._serialized_end=834 - _globals['_METRICSRECORD']._serialized_start=837 - _globals['_METRICSRECORD']._serialized_end=980 - _globals['_METRICSRECORD_DATAENTRY']._serialized_start=905 - _globals['_METRICSRECORD_DATAENTRY']._serialized_end=980 - _globals['_CONFIGSRECORD']._serialized_start=983 - _globals['_CONFIGSRECORD']._serialized_end=1126 - _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1051 - _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1126 - _globals['_RECORDSET']._serialized_start=1129 - _globals['_RECORDSET']._serialized_end=1536 - _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1307 - _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1386 - _globals['_RECORDSET_METRICSENTRY']._serialized_start=1388 - _globals['_RECORDSET_METRICSENTRY']._serialized_end=1461 - _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1463 - _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1536 + _globals['_SINTLIST']._serialized_start=70 + _globals['_SINTLIST']._serialized_end=94 + _globals['_UINTLIST']._serialized_start=96 + _globals['_UINTLIST']._serialized_end=120 + _globals['_BOOLLIST']._serialized_start=122 + _globals['_BOOLLIST']._serialized_end=146 + _globals['_STRINGLIST']._serialized_start=148 + _globals['_STRINGLIST']._serialized_end=174 + _globals['_BYTESLIST']._serialized_start=176 + _globals['_BYTESLIST']._serialized_end=201 + _globals['_ARRAY']._serialized_start=203 + _globals['_ARRAY']._serialized_end=269 + _globals['_METRICSRECORDVALUE']._serialized_start=272 + _globals['_METRICSRECORDVALUE']._serialized_end=488 + _globals['_CONFIGSRECORDVALUE']._serialized_start=491 + _globals['_CONFIGSRECORDVALUE']._serialized_end=893 + _globals['_PARAMETERSRECORD']._serialized_start=895 + _globals['_PARAMETERSRECORD']._serialized_end=972 + _globals['_METRICSRECORD']._serialized_start=975 + _globals['_METRICSRECORD']._serialized_end=1118 + _globals['_METRICSRECORD_DATAENTRY']._serialized_start=1043 + _globals['_METRICSRECORD_DATAENTRY']._serialized_end=1118 + _globals['_CONFIGSRECORD']._serialized_start=1121 + _globals['_CONFIGSRECORD']._serialized_end=1264 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_start=1189 + _globals['_CONFIGSRECORD_DATAENTRY']._serialized_end=1264 + _globals['_RECORDSET']._serialized_start=1267 + _globals['_RECORDSET']._serialized_end=1674 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_start=1445 + _globals['_RECORDSET_PARAMETERSENTRY']._serialized_end=1524 + _globals['_RECORDSET_METRICSENTRY']._serialized_start=1526 + _globals['_RECORDSET_METRICSENTRY']._serialized_end=1599 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_start=1601 + _globals['_RECORDSET_CONFIGSENTRY']._serialized_end=1674 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/recordset_pb2.pyi b/src/py/flwr/proto/recordset_pb2.pyi index 86244697129c..91d17e3e6473 100644 --- a/src/py/flwr/proto/recordset_pb2.pyi +++ b/src/py/flwr/proto/recordset_pb2.pyi @@ -23,7 +23,7 @@ class DoubleList(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... global___DoubleList = DoubleList -class Sint64List(google.protobuf.message.Message): +class SintList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor VALS_FIELD_NUMBER: builtins.int @property @@ -33,7 +33,19 @@ class Sint64List(google.protobuf.message.Message): vals: typing.Optional[typing.Iterable[builtins.int]] = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... -global___Sint64List = Sint64List +global___SintList = SintList + +class UintList(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + VALS_FIELD_NUMBER: builtins.int + @property + def vals(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + vals: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["vals",b"vals"]) -> None: ... +global___UintList = UintList class BoolList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -96,39 +108,48 @@ class MetricsRecordValue(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int + SINT_LIST_FIELD_NUMBER: builtins.int + UINT_LIST_FIELD_NUMBER: builtins.int double: builtins.float """Single element""" sint64: builtins.int + uint64: builtins.int @property def double_list(self) -> global___DoubleList: """List types""" pass @property - def sint64_list(self) -> global___Sint64List: ... + def sint_list(self) -> global___SintList: ... + @property + def uint_list(self) -> global___UintList: ... def __init__(self, *, double: builtins.float = ..., sint64: builtins.int = ..., + uint64: builtins.int = ..., double_list: typing.Optional[global___DoubleList] = ..., - sint64_list: typing.Optional[global___Sint64List] = ..., + sint_list: typing.Optional[global___SintList] = ..., + uint_list: typing.Optional[global___UintList] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","double_list","sint64_list"]]: ... + def HasField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","uint64","double_list","sint_list","uint_list"]]: ... global___MetricsRecordValue = MetricsRecordValue class ConfigsRecordValue(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int BOOL_FIELD_NUMBER: builtins.int STRING_FIELD_NUMBER: builtins.int BYTES_FIELD_NUMBER: builtins.int DOUBLE_LIST_FIELD_NUMBER: builtins.int - SINT64_LIST_FIELD_NUMBER: builtins.int + SINT_LIST_FIELD_NUMBER: builtins.int + UINT_LIST_FIELD_NUMBER: builtins.int BOOL_LIST_FIELD_NUMBER: builtins.int STRING_LIST_FIELD_NUMBER: builtins.int BYTES_LIST_FIELD_NUMBER: builtins.int @@ -136,6 +157,7 @@ class ConfigsRecordValue(google.protobuf.message.Message): """Single element""" sint64: builtins.int + uint64: builtins.int bool: builtins.bool string: typing.Text bytes: builtins.bytes @@ -144,7 +166,9 @@ class ConfigsRecordValue(google.protobuf.message.Message): """List types""" pass @property - def sint64_list(self) -> global___Sint64List: ... + def sint_list(self) -> global___SintList: ... + @property + def uint_list(self) -> global___UintList: ... @property def bool_list(self) -> global___BoolList: ... @property @@ -155,18 +179,20 @@ class ConfigsRecordValue(google.protobuf.message.Message): *, double: builtins.float = ..., sint64: builtins.int = ..., + uint64: builtins.int = ..., bool: builtins.bool = ..., string: typing.Text = ..., bytes: builtins.bytes = ..., double_list: typing.Optional[global___DoubleList] = ..., - sint64_list: typing.Optional[global___Sint64List] = ..., + sint_list: typing.Optional[global___SintList] = ..., + uint_list: typing.Optional[global___UintList] = ..., bool_list: typing.Optional[global___BoolList] = ..., string_list: typing.Optional[global___StringList] = ..., bytes_list: typing.Optional[global___BytesList] = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint64_list",b"sint64_list","string",b"string","string_list",b"string_list","value",b"value"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes","double_list","sint64_list","bool_list","string_list","bytes_list"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","string",b"string","string_list",b"string_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bool_list",b"bool_list","bytes",b"bytes","bytes_list",b"bytes_list","double",b"double","double_list",b"double_list","sint64",b"sint64","sint_list",b"sint_list","string",b"string","string_list",b"string_list","uint64",b"uint64","uint_list",b"uint_list","value",b"value"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["value",b"value"]) -> typing.Optional[typing_extensions.Literal["double","sint64","uint64","bool","string","bytes","double_list","sint_list","uint_list","bool_list","string_list","bytes_list"]]: ... global___ConfigsRecordValue = ConfigsRecordValue class ParametersRecord(google.protobuf.message.Message): diff --git a/src/py/flwr/proto/run_pb2.py b/src/py/flwr/proto/run_pb2.py index 4892091a6a46..cc3f6897918f 100644 --- a/src/py/flwr/proto/run_pb2.py +++ b/src/py/flwr/proto/run_pb2.py @@ -12,10 +12,12 @@ _sym_db = _symbol_database.Default() +from flwr.proto import fab_pb2 as flwr_dot_proto_dot_fab__pb2 +from flwr.proto import node_pb2 as flwr_dot_proto_dot_node__pb2 from flwr.proto import transport_pb2 as flwr_dot_proto_dot_transport__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"\x1f\n\rGetRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x12\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Runb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14\x66lwr/proto/run.proto\x12\nflwr.proto\x1a\x14\x66lwr/proto/fab.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/transport.proto\"\xd5\x01\n\x03Run\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12\x0e\n\x06\x66\x61\x62_id\x18\x02 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x03 \x01(\t\x12<\n\x0foverride_config\x18\x04 \x03(\x0b\x32#.flwr.proto.Run.OverrideConfigEntry\x12\x10\n\x08\x66\x61\x62_hash\x18\x05 \x01(\t\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"@\n\tRunStatus\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x12\n\nsub_status\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"\xeb\x01\n\x10\x43reateRunRequest\x12\x0e\n\x06\x66\x61\x62_id\x18\x01 \x01(\t\x12\x13\n\x0b\x66\x61\x62_version\x18\x02 \x01(\t\x12I\n\x0foverride_config\x18\x03 \x03(\x0b\x32\x30.flwr.proto.CreateRunRequest.OverrideConfigEntry\x12\x1c\n\x03\x66\x61\x62\x18\x04 \x01(\x0b\x32\x0f.flwr.proto.Fab\x1aI\n\x13OverrideConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\"#\n\x11\x43reateRunResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\"?\n\rGetRunRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0e\n\x06run_id\x18\x02 \x01(\x04\".\n\x0eGetRunResponse\x12\x1c\n\x03run\x18\x01 \x01(\x0b\x32\x0f.flwr.proto.Run\"S\n\x16UpdateRunStatusRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\x04\x12)\n\nrun_status\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus\"\x19\n\x17UpdateRunStatusResponse\"F\n\x13GetRunStatusRequest\x12\x1e\n\x04node\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x0f\n\x07run_ids\x18\x02 \x03(\x04\"\xb1\x01\n\x14GetRunStatusResponse\x12L\n\x0frun_status_dict\x18\x01 \x03(\x0b\x32\x33.flwr.proto.GetRunStatusResponse.RunStatusDictEntry\x1aK\n\x12RunStatusDictEntry\x12\x0b\n\x03key\x18\x01 \x01(\x04\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.flwr.proto.RunStatus:\x02\x38\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -24,12 +26,34 @@ DESCRIPTOR._options = None _globals['_RUN_OVERRIDECONFIGENTRY']._options = None _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' - _globals['_RUN']._serialized_start=65 - _globals['_RUN']._serialized_end=278 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=205 - _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=278 - _globals['_GETRUNREQUEST']._serialized_start=280 - _globals['_GETRUNREQUEST']._serialized_end=311 - _globals['_GETRUNRESPONSE']._serialized_start=313 - _globals['_GETRUNRESPONSE']._serialized_end=359 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._options = None + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_options = b'8\001' + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._options = None + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_options = b'8\001' + _globals['_RUN']._serialized_start=110 + _globals['_RUN']._serialized_end=323 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_start=250 + _globals['_RUN_OVERRIDECONFIGENTRY']._serialized_end=323 + _globals['_RUNSTATUS']._serialized_start=325 + _globals['_RUNSTATUS']._serialized_end=389 + _globals['_CREATERUNREQUEST']._serialized_start=392 + _globals['_CREATERUNREQUEST']._serialized_end=627 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_start=250 + _globals['_CREATERUNREQUEST_OVERRIDECONFIGENTRY']._serialized_end=323 + _globals['_CREATERUNRESPONSE']._serialized_start=629 + _globals['_CREATERUNRESPONSE']._serialized_end=664 + _globals['_GETRUNREQUEST']._serialized_start=666 + _globals['_GETRUNREQUEST']._serialized_end=729 + _globals['_GETRUNRESPONSE']._serialized_start=731 + _globals['_GETRUNRESPONSE']._serialized_end=777 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_start=779 + _globals['_UPDATERUNSTATUSREQUEST']._serialized_end=862 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_start=864 + _globals['_UPDATERUNSTATUSRESPONSE']._serialized_end=889 + _globals['_GETRUNSTATUSREQUEST']._serialized_start=891 + _globals['_GETRUNSTATUSREQUEST']._serialized_end=961 + _globals['_GETRUNSTATUSRESPONSE']._serialized_start=964 + _globals['_GETRUNSTATUSRESPONSE']._serialized_end=1141 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_start=1066 + _globals['_GETRUNSTATUSRESPONSE_RUNSTATUSDICTENTRY']._serialized_end=1141 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/run_pb2.pyi b/src/py/flwr/proto/run_pb2.pyi index e65feee9c518..16411712eaf2 100644 --- a/src/py/flwr/proto/run_pb2.pyi +++ b/src/py/flwr/proto/run_pb2.pyi @@ -3,6 +3,8 @@ isort:skip_file """ import builtins +import flwr.proto.fab_pb2 +import flwr.proto.node_pb2 import flwr.proto.transport_pb2 import google.protobuf.descriptor import google.protobuf.internal.containers @@ -51,7 +53,69 @@ class Run(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["fab_hash",b"fab_hash","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config","run_id",b"run_id"]) -> None: ... global___Run = Run -class GetRunRequest(google.protobuf.message.Message): +class RunStatus(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + STATUS_FIELD_NUMBER: builtins.int + SUB_STATUS_FIELD_NUMBER: builtins.int + DETAILS_FIELD_NUMBER: builtins.int + status: typing.Text + """"starting", "running", "finished" """ + + sub_status: typing.Text + """"completed", "failed", "stopped" or "" (non-finished)""" + + details: typing.Text + """failure details""" + + def __init__(self, + *, + status: typing.Text = ..., + sub_status: typing.Text = ..., + details: typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["details",b"details","status",b"status","sub_status",b"sub_status"]) -> None: ... +global___RunStatus = RunStatus + +class CreateRunRequest(google.protobuf.message.Message): + """CreateRun""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class OverrideConfigEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text + @property + def value(self) -> flwr.proto.transport_pb2.Scalar: ... + def __init__(self, + *, + key: typing.Text = ..., + value: typing.Optional[flwr.proto.transport_pb2.Scalar] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + FAB_ID_FIELD_NUMBER: builtins.int + FAB_VERSION_FIELD_NUMBER: builtins.int + OVERRIDE_CONFIG_FIELD_NUMBER: builtins.int + FAB_FIELD_NUMBER: builtins.int + fab_id: typing.Text + fab_version: typing.Text + @property + def override_config(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, flwr.proto.transport_pb2.Scalar]: ... + @property + def fab(self) -> flwr.proto.fab_pb2.Fab: ... + def __init__(self, + *, + fab_id: typing.Text = ..., + fab_version: typing.Text = ..., + override_config: typing.Optional[typing.Mapping[typing.Text, flwr.proto.transport_pb2.Scalar]] = ..., + fab: typing.Optional[flwr.proto.fab_pb2.Fab] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["fab",b"fab"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["fab",b"fab","fab_id",b"fab_id","fab_version",b"fab_version","override_config",b"override_config"]) -> None: ... +global___CreateRunRequest = CreateRunRequest + +class CreateRunResponse(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor RUN_ID_FIELD_NUMBER: builtins.int run_id: builtins.int @@ -60,6 +124,23 @@ class GetRunRequest(google.protobuf.message.Message): run_id: builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id"]) -> None: ... +global___CreateRunResponse = CreateRunResponse + +class GetRunRequest(google.protobuf.message.Message): + """GetRun""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + RUN_ID_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + run_id: builtins.int + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + run_id: builtins.int = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","run_id",b"run_id"]) -> None: ... global___GetRunRequest = GetRunRequest class GetRunResponse(google.protobuf.message.Message): @@ -74,3 +155,71 @@ class GetRunResponse(google.protobuf.message.Message): def HasField(self, field_name: typing_extensions.Literal["run",b"run"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["run",b"run"]) -> None: ... global___GetRunResponse = GetRunResponse + +class UpdateRunStatusRequest(google.protobuf.message.Message): + """UpdateRunStatus""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + RUN_ID_FIELD_NUMBER: builtins.int + RUN_STATUS_FIELD_NUMBER: builtins.int + run_id: builtins.int + @property + def run_status(self) -> global___RunStatus: ... + def __init__(self, + *, + run_id: builtins.int = ..., + run_status: typing.Optional[global___RunStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["run_status",b"run_status"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["run_id",b"run_id","run_status",b"run_status"]) -> None: ... +global___UpdateRunStatusRequest = UpdateRunStatusRequest + +class UpdateRunStatusResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + def __init__(self, + ) -> None: ... +global___UpdateRunStatusResponse = UpdateRunStatusResponse + +class GetRunStatusRequest(google.protobuf.message.Message): + """GetRunStatus""" + DESCRIPTOR: google.protobuf.descriptor.Descriptor + NODE_FIELD_NUMBER: builtins.int + RUN_IDS_FIELD_NUMBER: builtins.int + @property + def node(self) -> flwr.proto.node_pb2.Node: ... + @property + def run_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + node: typing.Optional[flwr.proto.node_pb2.Node] = ..., + run_ids: typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["node",b"node"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["node",b"node","run_ids",b"run_ids"]) -> None: ... +global___GetRunStatusRequest = GetRunStatusRequest + +class GetRunStatusResponse(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + class RunStatusDictEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int + @property + def value(self) -> global___RunStatus: ... + def __init__(self, + *, + key: builtins.int = ..., + value: typing.Optional[global___RunStatus] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + RUN_STATUS_DICT_FIELD_NUMBER: builtins.int + @property + def run_status_dict(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___RunStatus]: ... + def __init__(self, + *, + run_status_dict: typing.Optional[typing.Mapping[builtins.int, global___RunStatus]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["run_status_dict",b"run_status_dict"]) -> None: ... +global___GetRunStatusResponse = GetRunStatusResponse diff --git a/src/py/flwr/proto/task_pb2.py b/src/py/flwr/proto/task_pb2.py index 3e044f9ec846..75b022dc65ea 100644 --- a/src/py/flwr/proto/task_pb2.py +++ b/src/py/flwr/proto/task_pb2.py @@ -17,7 +17,7 @@ from flwr.proto import error_pb2 as flwr_dot_proto_dot_error__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x12\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15\x66lwr/proto/task.proto\x12\nflwr.proto\x1a\x15\x66lwr/proto/node.proto\x1a\x1a\x66lwr/proto/recordset.proto\x1a\x16\x66lwr/proto/error.proto\"\x89\x02\n\x04Task\x12\"\n\x08producer\x18\x01 \x01(\x0b\x32\x10.flwr.proto.Node\x12\"\n\x08\x63onsumer\x18\x02 \x01(\x0b\x32\x10.flwr.proto.Node\x12\x12\n\ncreated_at\x18\x03 \x01(\x01\x12\x14\n\x0c\x64\x65livered_at\x18\x04 \x01(\t\x12\x11\n\tpushed_at\x18\x05 \x01(\x01\x12\x0b\n\x03ttl\x18\x06 \x01(\x01\x12\x10\n\x08\x61ncestry\x18\x07 \x03(\t\x12\x11\n\ttask_type\x18\x08 \x01(\t\x12(\n\trecordset\x18\t \x01(\x0b\x32\x15.flwr.proto.RecordSet\x12 \n\x05\x65rror\x18\n \x01(\x0b\x32\x11.flwr.proto.Error\"\\\n\x07TaskIns\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x04\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Task\"\\\n\x07TaskRes\x12\x0f\n\x07task_id\x18\x01 \x01(\t\x12\x10\n\x08group_id\x18\x02 \x01(\t\x12\x0e\n\x06run_id\x18\x03 \x01(\x04\x12\x1e\n\x04task\x18\x04 \x01(\x0b\x32\x10.flwr.proto.Taskb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) diff --git a/src/py/flwr/proto/transport_pb2.py b/src/py/flwr/proto/transport_pb2.py index d3aae72b63ab..b457463f99ca 100644 --- a/src/py/flwr/proto/transport_pb2.py +++ b/src/py/flwr/proto/transport_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"i\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lwr/proto/transport.proto\x12\nflwr.proto\"9\n\x06Status\x12\x1e\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x10.flwr.proto.Code\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\nParameters\x12\x0f\n\x07tensors\x18\x01 \x03(\x0c\x12\x13\n\x0btensor_type\x18\x02 \x01(\t\"\xba\x08\n\rServerMessage\x12?\n\rreconnect_ins\x18\x01 \x01(\x0b\x32&.flwr.proto.ServerMessage.ReconnectInsH\x00\x12H\n\x12get_properties_ins\x18\x02 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetPropertiesInsH\x00\x12H\n\x12get_parameters_ins\x18\x03 \x01(\x0b\x32*.flwr.proto.ServerMessage.GetParametersInsH\x00\x12\x33\n\x07\x66it_ins\x18\x04 \x01(\x0b\x32 .flwr.proto.ServerMessage.FitInsH\x00\x12=\n\x0c\x65valuate_ins\x18\x05 \x01(\x0b\x32%.flwr.proto.ServerMessage.EvaluateInsH\x00\x1a\x1f\n\x0cReconnectIns\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x1a\x9d\x01\n\x10GetPropertiesIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetPropertiesIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x9d\x01\n\x10GetParametersIns\x12\x46\n\x06\x63onfig\x18\x01 \x03(\x0b\x32\x36.flwr.proto.ServerMessage.GetParametersIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xb5\x01\n\x06\x46itIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12<\n\x06\x63onfig\x18\x02 \x03(\x0b\x32,.flwr.proto.ServerMessage.FitIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xbf\x01\n\x0b\x45valuateIns\x12*\n\nparameters\x18\x01 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x41\n\x06\x63onfig\x18\x02 \x03(\x0b\x32\x31.flwr.proto.ServerMessage.EvaluateIns.ConfigEntry\x1a\x41\n\x0b\x43onfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"\xa0\t\n\rClientMessage\x12\x41\n\x0e\x64isconnect_res\x18\x01 \x01(\x0b\x32\'.flwr.proto.ClientMessage.DisconnectResH\x00\x12H\n\x12get_properties_res\x18\x02 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetPropertiesResH\x00\x12H\n\x12get_parameters_res\x18\x03 \x01(\x0b\x32*.flwr.proto.ClientMessage.GetParametersResH\x00\x12\x33\n\x07\x66it_res\x18\x04 \x01(\x0b\x32 .flwr.proto.ClientMessage.FitResH\x00\x12=\n\x0c\x65valuate_res\x18\x05 \x01(\x0b\x32%.flwr.proto.ClientMessage.EvaluateResH\x00\x1a\x33\n\rDisconnectRes\x12\"\n\x06reason\x18\x01 \x01(\x0e\x32\x12.flwr.proto.Reason\x1a\xcd\x01\n\x10GetPropertiesRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12N\n\nproperties\x18\x02 \x03(\x0b\x32:.flwr.proto.ClientMessage.GetPropertiesRes.PropertiesEntry\x1a\x45\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\x62\n\x10GetParametersRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x1a\xf2\x01\n\x06\x46itRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12*\n\nparameters\x18\x02 \x01(\x0b\x32\x16.flwr.proto.Parameters\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12>\n\x07metrics\x18\x04 \x03(\x0b\x32-.flwr.proto.ClientMessage.FitRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x1a\xde\x01\n\x0b\x45valuateRes\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.flwr.proto.Status\x12\x0c\n\x04loss\x18\x02 \x01(\x02\x12\x14\n\x0cnum_examples\x18\x03 \x01(\x03\x12\x43\n\x07metrics\x18\x04 \x03(\x0b\x32\x32.flwr.proto.ClientMessage.EvaluateRes.MetricsEntry\x1a\x42\n\x0cMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.flwr.proto.Scalar:\x02\x38\x01\x42\x05\n\x03msg\"{\n\x06Scalar\x12\x10\n\x06\x64ouble\x18\x01 \x01(\x01H\x00\x12\x10\n\x06uint64\x18\x06 \x01(\x04H\x00\x12\x10\n\x06sint64\x18\x08 \x01(\x12H\x00\x12\x0e\n\x04\x62ool\x18\r \x01(\x08H\x00\x12\x10\n\x06string\x18\x0e \x01(\tH\x00\x12\x0f\n\x05\x62ytes\x18\x0f \x01(\x0cH\x00\x42\x08\n\x06scalar*\x8d\x01\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\"\n\x1eGET_PROPERTIES_NOT_IMPLEMENTED\x10\x01\x12\"\n\x1eGET_PARAMETERS_NOT_IMPLEMENTED\x10\x02\x12\x17\n\x13\x46IT_NOT_IMPLEMENTED\x10\x03\x12\x1c\n\x18\x45VALUATE_NOT_IMPLEMENTED\x10\x04*[\n\x06Reason\x12\x0b\n\x07UNKNOWN\x10\x00\x12\r\n\tRECONNECT\x10\x01\x12\x16\n\x12POWER_DISCONNECTED\x10\x02\x12\x14\n\x10WIFI_UNAVAILABLE\x10\x03\x12\x07\n\x03\x41\x43K\x10\x04\x32S\n\rFlowerService\x12\x42\n\x04Join\x12\x19.flwr.proto.ClientMessage\x1a\x19.flwr.proto.ServerMessage\"\x00(\x01\x30\x01\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -35,10 +35,10 @@ _globals['_CLIENTMESSAGE_FITRES_METRICSENTRY']._serialized_options = b'8\001' _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._options = None _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_options = b'8\001' - _globals['_CODE']._serialized_start=2533 - _globals['_CODE']._serialized_end=2674 - _globals['_REASON']._serialized_start=2676 - _globals['_REASON']._serialized_end=2767 + _globals['_CODE']._serialized_start=2551 + _globals['_CODE']._serialized_end=2692 + _globals['_REASON']._serialized_start=2694 + _globals['_REASON']._serialized_end=2785 _globals['_STATUS']._serialized_start=42 _globals['_STATUS']._serialized_end=99 _globals['_PARAMETERS']._serialized_start=101 @@ -82,7 +82,7 @@ _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_start=2125 _globals['_CLIENTMESSAGE_EVALUATERES_METRICSENTRY']._serialized_end=2191 _globals['_SCALAR']._serialized_start=2425 - _globals['_SCALAR']._serialized_end=2530 - _globals['_FLOWERSERVICE']._serialized_start=2769 - _globals['_FLOWERSERVICE']._serialized_end=2852 + _globals['_SCALAR']._serialized_end=2548 + _globals['_FLOWERSERVICE']._serialized_start=2787 + _globals['_FLOWERSERVICE']._serialized_end=2870 # @@protoc_insertion_point(module_scope) diff --git a/src/py/flwr/proto/transport_pb2.pyi b/src/py/flwr/proto/transport_pb2.pyi index d10a1536ceab..0fe541f0a320 100644 --- a/src/py/flwr/proto/transport_pb2.pyi +++ b/src/py/flwr/proto/transport_pb2.pyi @@ -402,20 +402,22 @@ global___ClientMessage = ClientMessage class Scalar(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor DOUBLE_FIELD_NUMBER: builtins.int + UINT64_FIELD_NUMBER: builtins.int SINT64_FIELD_NUMBER: builtins.int BOOL_FIELD_NUMBER: builtins.int STRING_FIELD_NUMBER: builtins.int BYTES_FIELD_NUMBER: builtins.int double: builtins.float - sint64: builtins.int + uint64: builtins.int """float float = 2; int32 int32 = 3; int64 int64 = 4; uint32 uint32 = 5; - uint64 uint64 = 6; - sint32 sint32 = 7; """ + sint64: builtins.int + """sint32 sint32 = 7;""" + bool: builtins.bool """fixed32 fixed32 = 9; fixed64 fixed64 = 10; @@ -428,12 +430,13 @@ class Scalar(google.protobuf.message.Message): def __init__(self, *, double: builtins.float = ..., + uint64: builtins.int = ..., sint64: builtins.int = ..., bool: builtins.bool = ..., string: typing.Text = ..., bytes: builtins.bytes = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["scalar",b"scalar"]) -> typing.Optional[typing_extensions.Literal["double","sint64","bool","string","bytes"]]: ... + def HasField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string","uint64",b"uint64"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["bool",b"bool","bytes",b"bytes","double",b"double","scalar",b"scalar","sint64",b"sint64","string",b"string","uint64",b"uint64"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["scalar",b"scalar"]) -> typing.Optional[typing_extensions.Literal["double","uint64","sint64","bool","string","bytes"]]: ... global___Scalar = Scalar diff --git a/src/py/flwr/server/__init__.py b/src/py/flwr/server/__init__.py index 896b46298327..1dde95b6b047 100644 --- a/src/py/flwr/server/__init__.py +++ b/src/py/flwr/server/__init__.py @@ -17,14 +17,12 @@ from . import strategy from . import workflow as workflow -from .app import run_superlink as run_superlink from .app import start_server as start_server from .client_manager import ClientManager as ClientManager from .client_manager import SimpleClientManager as SimpleClientManager from .compat import LegacyContext as LegacyContext from .driver import Driver as Driver from .history import History as History -from .run_serverapp import run_server_app as run_server_app from .server import Server as Server from .server_app import ServerApp as ServerApp from .server_config import ServerConfig as ServerConfig @@ -40,8 +38,6 @@ "ServerAppComponents", "ServerConfig", "SimpleClientManager", - "run_server_app", - "run_superlink", "start_server", "strategy", "workflow", diff --git a/src/py/flwr/server/app.py b/src/py/flwr/server/app.py index ef632a0c014d..0b6325a81c2b 100644 --- a/src/py/flwr/server/app.py +++ b/src/py/flwr/server/app.py @@ -19,10 +19,11 @@ import importlib.util import sys import threading +from collections.abc import Sequence from logging import INFO, WARN from os.path import isfile from pathlib import Path -from typing import Optional, Sequence, Set, Tuple +from typing import Optional import grpc from cryptography.exceptions import UnsupportedAlgorithm @@ -34,8 +35,15 @@ from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address -from flwr.common.config import get_flwr_dir +from flwr.common.config import get_flwr_dir, parse_config_args from flwr.common.constant import ( + DRIVER_API_DEFAULT_ADDRESS, + EXEC_API_DEFAULT_ADDRESS, + FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, + FLEET_API_REST_DEFAULT_ADDRESS, + ISOLATION_MODE_PROCESS, + ISOLATION_MODE_SUBPROCESS, MISSING_EXTRA_REST, TRANSPORT_TYPE_GRPC_ADAPTER, TRANSPORT_TYPE_GRPC_RERE, @@ -51,6 +59,8 @@ add_FleetServicer_to_server, ) from flwr.proto.grpcadapter_pb2_grpc import add_GrpcAdapterServicer_to_server +from flwr.superexec.app import load_executor +from flwr.superexec.exec_grpc import run_superexec_api_grpc from .client_manager import ClientManager from .history import History @@ -66,12 +76,7 @@ ) from .superlink.fleet.grpc_rere.fleet_servicer import FleetServicer from .superlink.fleet.grpc_rere.server_interceptor import AuthenticateServerInterceptor -from .superlink.state import StateFactory - -ADDRESS_DRIVER_API = "0.0.0.0:9091" -ADDRESS_FLEET_API_GRPC_RERE = "0.0.0.0:9092" -ADDRESS_FLEET_API_GRPC_BIDI = "[::]:8080" # IPv6 to keep start_server compatible -ADDRESS_FLEET_API_REST = "0.0.0.0:9093" +from .superlink.linkstate import LinkStateFactory DATABASE = ":flwr-in-memory-state:" BASE_DIR = get_flwr_dir() / "superlink" / "ffs" @@ -79,13 +84,13 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals *, - server_address: str = ADDRESS_FLEET_API_GRPC_BIDI, + server_address: str = FLEET_API_GRPC_BIDI_DEFAULT_ADDRESS, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, ) -> History: """Start a Flower server using the gRPC transport layer. @@ -199,20 +204,21 @@ def start_server( # pylint: disable=too-many-arguments,too-many-locals # pylint: disable=too-many-branches, too-many-locals, too-many-statements def run_superlink() -> None: """Run Flower SuperLink (Driver API and Fleet API).""" + args = _parse_args_run_superlink().parse_args() + log(INFO, "Starting Flower SuperLink") event(EventType.RUN_SUPERLINK_ENTER) - args = _parse_args_run_superlink().parse_args() - - # Parse IP address + # Parse IP addresses driver_address, _, _ = _format_address(args.driver_api_address) + exec_address, _, _ = _format_address(args.exec_api_address) # Obtain certificates certificates = _try_obtain_certificates(args) # Initialize StateFactory - state_factory = StateFactory(args.database) + state_factory = LinkStateFactory(args.database) # Initialize FfsFactory ffs_factory = FfsFactory(args.storage_dir) @@ -224,17 +230,18 @@ def run_superlink() -> None: ffs_factory=ffs_factory, certificates=certificates, ) - grpc_servers = [driver_server] + + # Start Fleet API bckg_threads = [] if not args.fleet_api_address: if args.fleet_api_type in [ TRANSPORT_TYPE_GRPC_RERE, TRANSPORT_TYPE_GRPC_ADAPTER, ]: - args.fleet_api_address = ADDRESS_FLEET_API_GRPC_RERE + args.fleet_api_address = FLEET_API_GRPC_RERE_DEFAULT_ADDRESS elif args.fleet_api_type == TRANSPORT_TYPE_REST: - args.fleet_api_address = ADDRESS_FLEET_API_REST + args.fleet_api_address = FLEET_API_REST_DEFAULT_ADDRESS fleet_address, host, port = _format_address(args.fleet_api_address) @@ -250,7 +257,6 @@ def run_superlink() -> None: ) num_workers = 1 - # Start Fleet API if args.fleet_api_type == TRANSPORT_TYPE_REST: if ( importlib.util.find_spec("requests") @@ -278,24 +284,24 @@ def run_superlink() -> None: fleet_thread.start() bckg_threads.append(fleet_thread) elif args.fleet_api_type == TRANSPORT_TYPE_GRPC_RERE: - maybe_keys = _try_setup_client_authentication(args, certificates) + maybe_keys = _try_setup_node_authentication(args, certificates) interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None if maybe_keys is not None: ( - client_public_keys, + node_public_keys, server_private_key, server_public_key, ) = maybe_keys state = state_factory.state() - state.store_client_public_keys(client_public_keys) + state.store_node_public_keys(node_public_keys) state.store_server_private_public_key( private_key_to_bytes(server_private_key), public_key_to_bytes(server_public_key), ) log( INFO, - "Client authentication enabled with %d known public keys", - len(client_public_keys), + "Node authentication enabled with %d known public keys", + len(node_public_keys), ) interceptors = [AuthenticateServerInterceptor(state)] @@ -318,6 +324,17 @@ def run_superlink() -> None: else: raise ValueError(f"Unknown fleet_api_type: {args.fleet_api_type}") + # Start Exec API + exec_server: grpc.Server = run_superexec_api_grpc( + address=exec_address, + executor=load_executor(args), + certificates=certificates, + config=parse_config_args( + [args.executor_config] if args.executor_config else args.executor_config + ), + ) + grpc_servers.append(exec_server) + # Graceful shutdown register_exit_handlers( event_type=EventType.RUN_SUPERLINK_LEAVE, @@ -334,7 +351,7 @@ def run_superlink() -> None: driver_server.wait_for_termination(timeout=1) -def _format_address(address: str) -> Tuple[str, str, int]: +def _format_address(address: str) -> tuple[str, str, int]: parsed_address = parse_address(address) if not parsed_address: sys.exit( @@ -344,10 +361,10 @@ def _format_address(address: str) -> Tuple[str, str, int]: return (f"[{host}]:{port}" if is_v6 else f"{host}:{port}", host, port) -def _try_setup_client_authentication( +def _try_setup_node_authentication( args: argparse.Namespace, - certificates: Optional[Tuple[bytes, bytes, bytes]], -) -> Optional[Tuple[Set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: + certificates: Optional[tuple[bytes, bytes, bytes]], +) -> Optional[tuple[set[bytes], ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey]]: if ( not args.auth_list_public_keys and not args.auth_superlink_private_key @@ -373,16 +390,16 @@ def _try_setup_client_authentication( "`--ssl-keyfile`, and `—-ssl-ca-certfile` and try again." ) - client_keys_file_path = Path(args.auth_list_public_keys) - if not client_keys_file_path.exists(): + node_keys_file_path = Path(args.auth_list_public_keys) + if not node_keys_file_path.exists(): sys.exit( "The provided path to the known public keys CSV file does not exist: " - f"{client_keys_file_path}. " + f"{node_keys_file_path}. " "Please provide the CSV file path containing known public keys " "to '--auth-list-public-keys'." ) - client_public_keys: Set[bytes] = set() + node_public_keys: set[bytes] = set() try: ssh_private_key = load_ssh_private_key( @@ -413,13 +430,13 @@ def _try_setup_client_authentication( "path points to a valid public key file and try again." ) - with open(client_keys_file_path, newline="", encoding="utf-8") as csvfile: + with open(node_keys_file_path, newline="", encoding="utf-8") as csvfile: reader = csv.reader(csvfile) for row in reader: for element in row: public_key = load_ssh_public_key(element.encode()) if isinstance(public_key, ec.EllipticCurvePublicKey): - client_public_keys.add(public_key_to_bytes(public_key)) + node_public_keys.add(public_key_to_bytes(public_key)) else: sys.exit( "Error: Unable to parse the public keys in the CSV " @@ -427,7 +444,7 @@ def _try_setup_client_authentication( "known SSH public keys files and try again." ) return ( - client_public_keys, + node_public_keys, ssh_private_key, ssh_public_key, ) @@ -435,7 +452,7 @@ def _try_setup_client_authentication( def _try_obtain_certificates( args: argparse.Namespace, -) -> Optional[Tuple[bytes, bytes, bytes]]: +) -> Optional[tuple[bytes, bytes, bytes]]: # Obtain certificates if args.insecure: log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") @@ -489,9 +506,9 @@ def _try_obtain_certificates( def _run_fleet_api_grpc_rere( address: str, - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Run Fleet API (gRPC, request-response).""" @@ -517,9 +534,9 @@ def _run_fleet_api_grpc_rere( def _run_fleet_api_grpc_adapter( address: str, - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: """Run Fleet API (GrpcAdapter).""" # Create Fleet API gRPC server @@ -542,12 +559,13 @@ def _run_fleet_api_grpc_adapter( # pylint: disable=import-outside-toplevel,too-many-arguments +# pylint: disable=too-many-positional-arguments def _run_fleet_api_rest( host: str, port: int, ssl_keyfile: Optional[str], ssl_certfile: Optional[str], - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, num_workers: int, ) -> None: @@ -586,6 +604,7 @@ def _parse_args_run_superlink() -> argparse.ArgumentParser: _add_args_common(parser=parser) _add_args_driver_api(parser=parser) _add_args_fleet_api(parser=parser) + _add_args_exec_api(parser=parser) return parser @@ -617,6 +636,19 @@ def _add_args_common(parser: argparse.ArgumentParser) -> None: "to create a secure connection.", type=str, ) + parser.add_argument( + "--isolation", + default=ISOLATION_MODE_SUBPROCESS, + required=False, + choices=[ + ISOLATION_MODE_SUBPROCESS, + ISOLATION_MODE_PROCESS, + ], + help="Isolation mode when running a `ServerApp` (`subprocess` by default, " + "possible values: `subprocess`, `process`). Use `subprocess` to configure " + "SuperLink to run a `ServerApp` in a subprocess. Use `process` to indicate " + "that a separate independent process gets created outside of SuperLink.", + ) parser.add_argument( "--database", help="A string representing the path to the database " @@ -653,7 +685,7 @@ def _add_args_driver_api(parser: argparse.ArgumentParser) -> None: parser.add_argument( "--driver-api-address", help="Driver API (gRPC) server address (IPv4, IPv6, or a domain name).", - default=ADDRESS_DRIVER_API, + default=DRIVER_API_DEFAULT_ADDRESS, ) @@ -680,3 +712,29 @@ def _add_args_fleet_api(parser: argparse.ArgumentParser) -> None: type=int, help="Set the number of concurrent workers for the Fleet API server.", ) + + +def _add_args_exec_api(parser: argparse.ArgumentParser) -> None: + """Add command line arguments for Exec API.""" + parser.add_argument( + "--exec-api-address", + help="Exec API server address (IPv4, IPv6, or a domain name)", + default=EXEC_API_DEFAULT_ADDRESS, + ) + parser.add_argument( + "--executor", + help="For example: `deployment:exec` or `project.package.module:wrapper.exec`. " + "The default is `flwr.superexec.deployment:executor`", + default="flwr.superexec.deployment:executor", + ) + parser.add_argument( + "--executor-dir", + help="The directory for the executor.", + default=".", + ) + parser.add_argument( + "--executor-config", + help="Key-value pairs for the executor config, separated by spaces. " + "For example:\n\n`--executor-config 'verbose=true " + 'root-certificates="certificates/superlink-ca.crt"\'`', + ) diff --git a/src/py/flwr/server/client_manager.py b/src/py/flwr/server/client_manager.py index 7956e282bd2c..9949e29f8f7d 100644 --- a/src/py/flwr/server/client_manager.py +++ b/src/py/flwr/server/client_manager.py @@ -19,7 +19,7 @@ import threading from abc import ABC, abstractmethod from logging import INFO -from typing import Dict, List, Optional +from typing import Optional from flwr.common.logger import log @@ -47,6 +47,7 @@ def register(self, client: ClientProxy) -> bool: Parameters ---------- client : flwr.server.client_proxy.ClientProxy + The ClientProxy of the Client to register. Returns ------- @@ -64,10 +65,11 @@ def unregister(self, client: ClientProxy) -> None: Parameters ---------- client : flwr.server.client_proxy.ClientProxy + The ClientProxy of the Client to unregister. """ @abstractmethod - def all(self) -> Dict[str, ClientProxy]: + def all(self) -> dict[str, ClientProxy]: """Return all available clients.""" @abstractmethod @@ -80,7 +82,7 @@ def sample( num_clients: int, min_num_clients: Optional[int] = None, criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: + ) -> list[ClientProxy]: """Sample a number of Flower ClientProxy instances.""" @@ -88,7 +90,7 @@ class SimpleClientManager(ClientManager): """Provides a pool of available clients.""" def __init__(self) -> None: - self.clients: Dict[str, ClientProxy] = {} + self.clients: dict[str, ClientProxy] = {} self._cv = threading.Condition() def __len__(self) -> int: @@ -170,7 +172,7 @@ def unregister(self, client: ClientProxy) -> None: with self._cv: self._cv.notify_all() - def all(self) -> Dict[str, ClientProxy]: + def all(self) -> dict[str, ClientProxy]: """Return all available clients.""" return self.clients @@ -179,7 +181,7 @@ def sample( num_clients: int, min_num_clients: Optional[int] = None, criterion: Optional[Criterion] = None, - ) -> List[ClientProxy]: + ) -> list[ClientProxy]: """Sample a number of Flower ClientProxy instances.""" # Block until at least num_clients are connected. if min_num_clients is None: diff --git a/src/py/flwr/server/compat/app.py b/src/py/flwr/server/compat/app.py index e978359fa828..1d3e5024ba90 100644 --- a/src/py/flwr/server/compat/app.py +++ b/src/py/flwr/server/compat/app.py @@ -18,7 +18,6 @@ from logging import INFO from typing import Optional -from flwr.common import EventType, event from flwr.common.logger import log from flwr.server.client_manager import ClientManager from flwr.server.history import History @@ -65,8 +64,6 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals hist : flwr.server.history.History Object containing training and evaluation metrics. """ - event(EventType.START_DRIVER_ENTER) - # Initialize the Driver API server and config initialized_server, initialized_config = init_defaults( server=server, @@ -96,6 +93,4 @@ def start_driver( # pylint: disable=too-many-arguments, too-many-locals f_stop.set() thread.join() - event(EventType.START_SERVER_LEAVE) - return hist diff --git a/src/py/flwr/server/compat/app_utils.py b/src/py/flwr/server/compat/app_utils.py index baff27307b88..8d2479f47d40 100644 --- a/src/py/flwr/server/compat/app_utils.py +++ b/src/py/flwr/server/compat/app_utils.py @@ -16,7 +16,6 @@ import threading -from typing import Dict, Tuple from ..client_manager import ClientManager from ..compat.driver_client_proxy import DriverClientProxy @@ -26,7 +25,7 @@ def start_update_client_manager_thread( driver: Driver, client_manager: ClientManager, -) -> Tuple[threading.Thread, threading.Event]: +) -> tuple[threading.Thread, threading.Event]: """Periodically update the nodes list in the client manager in a thread. This function starts a thread that periodically uses the associated driver to @@ -73,7 +72,7 @@ def _update_client_manager( ) -> None: """Update the nodes list in the client manager.""" # Loop until the driver is disconnected - registered_nodes: Dict[int, DriverClientProxy] = {} + registered_nodes: dict[int, DriverClientProxy] = {} while not f_stop.is_set(): all_node_ids = set(driver.get_node_ids()) dead_nodes = set(registered_nodes).difference(all_node_ids) diff --git a/src/py/flwr/server/compat/driver_client_proxy.py b/src/py/flwr/server/compat/driver_client_proxy.py index 7190786784ec..c5a3f561d474 100644 --- a/src/py/flwr/server/compat/driver_client_proxy.py +++ b/src/py/flwr/server/compat/driver_client_proxy.py @@ -15,7 +15,6 @@ """Flower ClientProxy implementation for Driver API.""" -import time from typing import Optional from flwr import common @@ -25,8 +24,6 @@ from ..driver.driver import Driver -SLEEP_TIME = 1 - class DriverClientProxy(ClientProxy): """Flower client proxy which delegates work using the Driver API.""" @@ -122,29 +119,18 @@ def _send_receive_recordset( ttl=timeout, ) - # Push message - message_ids = list(self.driver.push_messages(messages=[message])) - if len(message_ids) != 1: - raise ValueError("Unexpected number of message_ids") - - message_id = message_ids[0] - if message_id == "": - raise ValueError(f"Failed to send message to node {self.node_id}") - - if timeout: - start_time = time.time() - - while True: - messages = list(self.driver.pull_messages(message_ids)) - if len(messages) == 1: - msg: Message = messages[0] - if msg.has_error(): - raise ValueError( - f"Message contains an Error (reason: {msg.error.reason}). " - "It originated during client-side execution of a message." - ) - return msg.content - - if timeout is not None and time.time() > start_time + timeout: - raise RuntimeError("Timeout reached") - time.sleep(SLEEP_TIME) + # Send message and wait for reply + messages = list(self.driver.send_and_receive(messages=[message])) + + # A single reply is expected + if len(messages) != 1: + raise ValueError(f"Expected one Message but got: {len(messages)}") + + # Only messages without errors can be handled beyond these point + msg: Message = messages[0] + if msg.has_error(): + raise ValueError( + f"Message contains an Error (reason: {msg.error.reason}). " + "It originated during client-side execution of a message." + ) + return msg.content diff --git a/src/py/flwr/server/compat/driver_client_proxy_test.py b/src/py/flwr/server/compat/driver_client_proxy_test.py index 31b917fa869b..5bad0b56c4c6 100644 --- a/src/py/flwr/server/compat/driver_client_proxy_test.py +++ b/src/py/flwr/server/compat/driver_client_proxy_test.py @@ -17,7 +17,8 @@ import unittest import unittest.mock -from typing import Any, Callable, Iterable, Optional, Union, cast +from collections.abc import Iterable +from typing import Any, Callable, Optional, Union, cast from unittest.mock import Mock import numpy as np @@ -51,8 +52,6 @@ RUN_ID = 61016 NODE_ID = 1 -INSTRUCTION_MESSAGE_ID = "mock instruction message id" -REPLY_MESSAGE_ID = "mock reply message id" class DriverClientProxyTestCase(unittest.TestCase): @@ -76,7 +75,7 @@ def test_get_properties(self) -> None: """Test positive case.""" # Prepare res = GetPropertiesRes(status=CLIENT_STATUS, properties=CLIENT_PROPERTIES) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) request_properties: Config = {"tensor_type": "str"} ins = GetPropertiesIns(config=request_properties) @@ -94,7 +93,7 @@ def test_get_parameters(self) -> None: status=CLIENT_STATUS, parameters=MESSAGE_PARAMETERS, ) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) ins = GetParametersIns(config={}) # Execute @@ -113,7 +112,7 @@ def test_fit(self) -> None: num_examples=10, metrics={}, ) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) ins = FitIns(parameters, {}) @@ -133,7 +132,7 @@ def test_evaluate(self) -> None: num_examples=0, metrics={}, ) - self.driver.push_messages.side_effect = self._get_push_messages(res) + self.driver.send_and_receive.side_effect = self._exec_send_and_receive(res) parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") ins = EvaluateIns(parameters, {}) @@ -147,7 +146,7 @@ def test_evaluate(self) -> None: def test_get_properties_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) request_properties: Config = {"tensor_type": "str"} @@ -162,7 +161,7 @@ def test_get_properties_and_fail(self) -> None: def test_get_parameters_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) ins = GetParametersIns(config={}) @@ -176,7 +175,7 @@ def test_get_parameters_and_fail(self) -> None: def test_fit_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) parameters = flwr.common.ndarrays_to_parameters([np.ones((2, 2))]) @@ -189,7 +188,7 @@ def test_fit_and_fail(self) -> None: def test_evaluate_and_fail(self) -> None: """Test negative case.""" # Prepare - self.driver.push_messages.side_effect = self._get_push_messages( + self.driver.send_and_receive.side_effect = self._exec_send_and_receive( None, error_reply=True ) parameters = Parameters(tensors=[b"random params%^&*F"], tensor_type="np") @@ -201,7 +200,7 @@ def test_evaluate_and_fail(self) -> None: ) self._common_assertions(ins) - def _create_message_dummy( # pylint: disable=R0913 + def _create_message_dummy( # pylint: disable=R0913,too-many-positional-arguments self, content: RecordSet, message_type: str, @@ -228,19 +227,19 @@ def _create_message_dummy( # pylint: disable=R0913 self.created_msg = Message(metadata=metadata, content=content) return self.created_msg - def _get_push_messages( + def _exec_send_and_receive( self, res: Union[GetParametersRes, GetPropertiesRes, FitRes, EvaluateRes, None], error_reply: bool = False, - ) -> Callable[[Iterable[Message]], Iterable[str]]: - """Get the push_messages function that sets the return value of pull_messages - when called.""" + ) -> Callable[[Iterable[Message]], Iterable[Message]]: + """Get the generate_replies function that sets the return value of driver's + send_and_receive when called.""" - def push_messages(messages: Iterable[Message]) -> Iterable[str]: + def generate_replies(messages: Iterable[Message]) -> Iterable[Message]: msg = list(messages)[0] + recordset = None if error_reply: - recordset = None - ret = msg.create_error_reply(ERROR_REPLY) + pass elif isinstance(res, GetParametersRes): recordset = compat.getparametersres_to_recordset(res, True) elif isinstance(res, GetPropertiesRes): @@ -249,17 +248,16 @@ def push_messages(messages: Iterable[Message]) -> Iterable[str]: recordset = compat.fitres_to_recordset(res, True) elif isinstance(res, EvaluateRes): recordset = compat.evaluateres_to_recordset(res) - else: - raise ValueError(f"Unsupported type: {type(res)}") + if recordset is not None: ret = msg.create_reply(recordset) - ret.metadata.__dict__["_message_id"] = REPLY_MESSAGE_ID + else: + ret = msg.create_error_reply(ERROR_REPLY) - # Set the return value of `pull_messages` - self.driver.pull_messages.return_value = [ret] - return [INSTRUCTION_MESSAGE_ID] + # Reply messages given the push message + return [ret] - return push_messages + return generate_replies def _common_assertions(self, original_ins: Any) -> None: """Check common assertions.""" @@ -274,18 +272,9 @@ def _common_assertions(self, original_ins: Any) -> None: self.assertEqual(self.called_times, 1) self.assertEqual(actual_ins, original_ins) - # Check if push_messages is called once with expected args/kwargs. - self.driver.push_messages.assert_called_once() - try: - self.driver.push_messages.assert_any_call([self.created_msg]) - except AssertionError: - self.driver.push_messages.assert_any_call(messages=[self.created_msg]) - - # Check if pull_messages is called once with expected args/kwargs. - self.driver.pull_messages.assert_called_once() + # Check if send_and_receive is called once with expected args/kwargs. + self.driver.send_and_receive.assert_called_once() try: - self.driver.pull_messages.assert_called_with([INSTRUCTION_MESSAGE_ID]) + self.driver.send_and_receive.assert_any_call([self.created_msg]) except AssertionError: - self.driver.pull_messages.assert_called_with( - message_ids=[INSTRUCTION_MESSAGE_ID] - ) + self.driver.send_and_receive.assert_any_call(messages=[self.created_msg]) diff --git a/src/py/flwr/server/driver/driver.py b/src/py/flwr/server/driver/driver.py index 4f888323e586..5a6ee691f3a9 100644 --- a/src/py/flwr/server/driver/driver.py +++ b/src/py/flwr/server/driver/driver.py @@ -16,7 +16,8 @@ from abc import ABC, abstractmethod -from typing import Iterable, List, Optional +from collections.abc import Iterable +from typing import Optional from flwr.common import Message, RecordSet from flwr.common.typing import Run @@ -31,7 +32,7 @@ def run(self) -> Run: """Run information.""" @abstractmethod - def create_message( # pylint: disable=too-many-arguments + def create_message( # pylint: disable=too-many-arguments,R0917 self, content: RecordSet, message_type: str, @@ -70,7 +71,7 @@ def create_message( # pylint: disable=too-many-arguments """ @abstractmethod - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" @abstractmethod diff --git a/src/py/flwr/server/driver/grpc_driver.py b/src/py/flwr/server/driver/grpc_driver.py index 80ce9623ab3f..13c1c4152dad 100644 --- a/src/py/flwr/server/driver/grpc_driver.py +++ b/src/py/flwr/server/driver/grpc_driver.py @@ -16,12 +16,14 @@ import time import warnings +from collections.abc import Iterable from logging import DEBUG, WARNING -from typing import Iterable, List, Optional, cast +from typing import Optional, cast import grpc -from flwr.common import DEFAULT_TTL, EventType, Message, Metadata, RecordSet, event +from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.common.serde import ( @@ -45,8 +47,6 @@ from .driver import Driver -DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091" - ERROR_MESSAGE_DRIVER_NOT_CONNECTED = """ [Driver] Error: Not connected. @@ -73,7 +73,7 @@ class GrpcDriver(Driver): def __init__( # pylint: disable=too-many-arguments self, run_id: int, - driver_service_address: str = DEFAULT_SERVER_ADDRESS_DRIVER, + driver_service_address: str = DRIVER_API_DEFAULT_ADDRESS, root_certificates: Optional[bytes] = None, ) -> None: self._run_id = run_id @@ -94,7 +94,6 @@ def _connect(self) -> None: This will not call GetRun. """ - event(EventType.DRIVER_CONNECT) if self._is_connected: log(WARNING, "Already connected") return @@ -108,7 +107,6 @@ def _connect(self) -> None: def _disconnect(self) -> None: """Disconnect from the Driver API.""" - event(EventType.DRIVER_DISCONNECT) if not self._is_connected: log(DEBUG, "Already disconnected") return @@ -160,7 +158,7 @@ def _check_message(self, message: Message) -> None: ): raise ValueError(f"Invalid message: {message}") - def create_message( # pylint: disable=too-many-arguments + def create_message( # pylint: disable=too-many-arguments,R0917 self, content: RecordSet, message_type: str, @@ -195,7 +193,7 @@ def create_message( # pylint: disable=too-many-arguments ) return Message(metadata=metadata, content=content) - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" self._init_run() # Call GrpcDriverStub method @@ -212,7 +210,7 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: """ self._init_run() # Construct TaskIns - task_ins_list: List[TaskIns] = [] + task_ins_list: list[TaskIns] = [] for msg in messages: # Check message self._check_message(msg) @@ -258,7 +256,7 @@ def send_and_receive( # Pull messages end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] + ret: list[Message] = [] while timeout is None or time.time() < end_time: res_msgs = self.pull_messages(msg_ids) ret.extend(res_msgs) diff --git a/src/py/flwr/server/driver/inmemory_driver.py b/src/py/flwr/server/driver/inmemory_driver.py index 53406796750f..4eb1eb9c1040 100644 --- a/src/py/flwr/server/driver/inmemory_driver.py +++ b/src/py/flwr/server/driver/inmemory_driver.py @@ -17,14 +17,15 @@ import time import warnings -from typing import Iterable, List, Optional, cast +from collections.abc import Iterable +from typing import Optional, cast from uuid import UUID from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.serde import message_from_taskres, message_to_taskins from flwr.common.typing import Run from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory from .driver import Driver @@ -38,16 +39,20 @@ class InMemoryDriver(Driver): The identifier of the run. state_factory : StateFactory A StateFactory embedding a state that this driver can interface with. + pull_interval : float (default=0.1) + Sleep duration between calls to `pull_messages`. """ def __init__( self, run_id: int, - state_factory: StateFactory, + state_factory: LinkStateFactory, + pull_interval: float = 0.1, ) -> None: self._run_id = run_id self._run: Optional[Run] = None self.state = state_factory.state() + self.pull_interval = pull_interval self.node = Node(node_id=0, anonymous=True) def _check_message(self, message: Message) -> None: @@ -77,7 +82,7 @@ def run(self) -> Run: self._init_run() return Run(**vars(cast(Run, self._run))) - def create_message( # pylint: disable=too-many-arguments + def create_message( # pylint: disable=too-many-arguments,R0917 self, content: RecordSet, message_type: str, @@ -112,7 +117,7 @@ def create_message( # pylint: disable=too-many-arguments ) return Message(metadata=metadata, content=content) - def get_node_ids(self) -> List[int]: + def get_node_ids(self) -> list[int]: """Get node IDs.""" self._init_run() return list(self.state.get_nodes(cast(Run, self._run).run_id)) @@ -123,7 +128,7 @@ def push_messages(self, messages: Iterable[Message]) -> Iterable[str]: This method takes an iterable of messages and sends each message to the node specified in `dst_node_id`. """ - task_ids: List[str] = [] + task_ids: list[str] = [] for msg in messages: # Check message self._check_message(msg) @@ -145,7 +150,7 @@ def pull_messages(self, message_ids: Iterable[str]) -> Iterable[Message]: """ msg_ids = {UUID(msg_id) for msg_id in message_ids} # Pull TaskRes - task_res_list = self.state.get_task_res(task_ids=msg_ids, limit=len(msg_ids)) + task_res_list = self.state.get_task_res(task_ids=msg_ids) # Delete tasks in state self.state.delete_tasks(msg_ids) # Convert TaskRes to Message @@ -169,7 +174,7 @@ def send_and_receive( # Pull messages end_time = time.time() + (timeout if timeout is not None else 0.0) - ret: List[Message] = [] + ret: list[Message] = [] while timeout is None or time.time() < end_time: res_msgs = self.pull_messages(msg_ids) ret.extend(res_msgs) @@ -179,5 +184,5 @@ def send_and_receive( if len(msg_ids) == 0: break # Sleep - time.sleep(3) + time.sleep(self.pull_interval) return ret diff --git a/src/py/flwr/server/driver/inmemory_driver_test.py b/src/py/flwr/server/driver/inmemory_driver_test.py index ddfdb249c1b4..bd961bd05936 100644 --- a/src/py/flwr/server/driver/inmemory_driver_test.py +++ b/src/py/flwr/server/driver/inmemory_driver_test.py @@ -17,7 +17,7 @@ import time import unittest -from typing import Iterable, List, Tuple +from collections.abc import Iterable from unittest.mock import MagicMock, patch from uuid import uuid4 @@ -32,13 +32,17 @@ ) from flwr.common.typing import Run from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import InMemoryState, SqliteState, StateFactory -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate import ( + InMemoryLinkState, + LinkStateFactory, + SqliteLinkState, +) +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .inmemory_driver import InMemoryDriver -def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str], int]: +def push_messages(driver: InMemoryDriver, num_nodes: int) -> tuple[Iterable[str], int]: """Help push messages to state.""" for _ in range(num_nodes): driver.state.create_node(ping_interval=PING_MAX_INTERVAL) @@ -55,7 +59,7 @@ def push_messages(driver: InMemoryDriver, num_nodes: int) -> Tuple[Iterable[str] def get_replies( driver: InMemoryDriver, msg_ids: Iterable[str], node_id: int -) -> List[str]: +) -> list[str]: """Help create message replies and pull taskres from state.""" taskins = driver.state.get_task_ins(node_id, limit=len(list(msg_ids))) for taskin in taskins: @@ -227,12 +231,12 @@ def test_send_and_receive_messages_timeout(self) -> None: def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: """Test tasks are deleted in sqlite state once messages are pulled.""" # Prepare - state = StateFactory("").state() + state = LinkStateFactory("").state() self.driver = InMemoryDriver( state.create_run("", "", "", {}), MagicMock(state=lambda: state) ) msg_ids, node_id = push_messages(self.driver, self.num_nodes) - assert isinstance(state, SqliteState) + assert isinstance(state, SqliteLinkState) # Check recorded task_ins = state.query("SELECT * FROM task_ins;") @@ -253,11 +257,11 @@ def test_task_store_consistency_after_push_pull_sqlitestate(self) -> None: def test_task_store_consistency_after_push_pull_inmemory_state(self) -> None: """Test tasks are deleted in in-memory state once messages are pulled.""" # Prepare - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") state = state_factory.state() self.driver = InMemoryDriver(state.create_run("", "", "", {}), state_factory) msg_ids, node_id = push_messages(self.driver, self.num_nodes) - assert isinstance(state, InMemoryState) + assert isinstance(state, InMemoryLinkState) # Check recorded self.assertEqual(len(state.task_ins_store), len(list(msg_ids))) diff --git a/src/py/flwr/server/history.py b/src/py/flwr/server/history.py index 291974a4323c..50daf2e04de6 100644 --- a/src/py/flwr/server/history.py +++ b/src/py/flwr/server/history.py @@ -17,7 +17,6 @@ import pprint from functools import reduce -from typing import Dict, List, Tuple from flwr.common.typing import Scalar @@ -26,11 +25,11 @@ class History: """History class for training and/or evaluation metrics collection.""" def __init__(self) -> None: - self.losses_distributed: List[Tuple[int, float]] = [] - self.losses_centralized: List[Tuple[int, float]] = [] - self.metrics_distributed_fit: Dict[str, List[Tuple[int, Scalar]]] = {} - self.metrics_distributed: Dict[str, List[Tuple[int, Scalar]]] = {} - self.metrics_centralized: Dict[str, List[Tuple[int, Scalar]]] = {} + self.losses_distributed: list[tuple[int, float]] = [] + self.losses_centralized: list[tuple[int, float]] = [] + self.metrics_distributed_fit: dict[str, list[tuple[int, Scalar]]] = {} + self.metrics_distributed: dict[str, list[tuple[int, Scalar]]] = {} + self.metrics_centralized: dict[str, list[tuple[int, Scalar]]] = {} def add_loss_distributed(self, server_round: int, loss: float) -> None: """Add one loss entry (from distributed evaluation).""" @@ -41,7 +40,7 @@ def add_loss_centralized(self, server_round: int, loss: float) -> None: self.losses_centralized.append((server_round, loss)) def add_metrics_distributed_fit( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from distributed fit).""" for key in metrics: @@ -52,7 +51,7 @@ def add_metrics_distributed_fit( self.metrics_distributed_fit[key].append((server_round, metrics[key])) def add_metrics_distributed( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from distributed evaluation).""" for key in metrics: @@ -63,7 +62,7 @@ def add_metrics_distributed( self.metrics_distributed[key].append((server_round, metrics[key])) def add_metrics_centralized( - self, server_round: int, metrics: Dict[str, Scalar] + self, server_round: int, metrics: dict[str, Scalar] ) -> None: """Add metrics entries (from centralized evaluation).""" for key in metrics: diff --git a/src/py/flwr/server/run_serverapp.py b/src/py/flwr/server/run_serverapp.py index 8f67c917c8ed..28a66e136639 100644 --- a/src/py/flwr/server/run_serverapp.py +++ b/src/py/flwr/server/run_serverapp.py @@ -31,21 +31,20 @@ get_project_config, get_project_dir, ) +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.logger import log, update_console_handler, warn_deprecated_feature from flwr.common.object_ref import load_app from flwr.common.typing import UserConfig -from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 +from flwr.proto.run_pb2 import ( # pylint: disable=E0611 CreateRunRequest, CreateRunResponse, ) -from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from .driver import Driver from .driver.grpc_driver import GrpcDriver from .server_app import LoadServerAppError, ServerApp -ADDRESS_DRIVER_API = "0.0.0.0:9091" - def run( driver: Driver, @@ -97,11 +96,26 @@ def run_server_app() -> None: args = _parse_args_run_server_app().parse_args() - if args.server != ADDRESS_DRIVER_API: + # Check if the server app reference is passed. + # Since Flower 1.11, passing a reference is not allowed. + app_path: Optional[str] = args.app + # If the provided app_path doesn't exist, and contains a ":", + # it is likely to be a server app reference instead of a path. + if app_path is not None and not Path(app_path).exists() and ":" in app_path: + sys.exit( + "It appears you've passed a reference like `server:app`.\n\n" + "Note that since version `1.11.0`, `flower-server-app` no longer supports " + "passing a reference to a `ServerApp` attribute. Instead, you need to pass " + "the path to Flower app via the argument `--app`. This is the path to a " + "directory containing a `pyproject.toml`. You can create a valid Flower " + "app by executing `flwr new` and following the prompt." + ) + + if args.server != DRIVER_API_DEFAULT_ADDRESS: warn = "Passing flag --server is deprecated. Use --superlink instead." warn_deprecated_feature(warn) - if args.superlink != ADDRESS_DRIVER_API: + if args.superlink != DRIVER_API_DEFAULT_ADDRESS: # if `--superlink` also passed, then # warn user that this argument overrides what was passed with `--server` log( @@ -151,7 +165,6 @@ def run_server_app() -> None: cert_path, ) - app_path: Optional[str] = args.app if not (app_path is None) ^ (args.run_id is None): raise sys.exit( "Please provide either a Flower App path or a Run ID, but not both. " @@ -168,19 +181,17 @@ def run_server_app() -> None: ) flwr_dir = get_flwr_dir(args.flwr_dir) run_ = driver.run - if run_.fab_hash: - fab_req = GetFabRequest(hash_str=run_.fab_hash) - # pylint: disable-next=W0212 - fab_res: GetFabResponse = driver._stub.GetFab(fab_req) - if fab_res.fab.hash_str != run_.fab_hash: - raise ValueError("FAB hashes don't match.") - - install_from_fab(fab_res.fab.content, flwr_dir, True) - fab_id, fab_version = get_fab_metadata(fab_res.fab.content) - else: - fab_id, fab_version = run_.fab_id, run_.fab_version - - app_path = str(get_project_dir(fab_id, fab_version, flwr_dir)) + if not run_.fab_hash: + raise ValueError("FAB hash not provided.") + fab_req = GetFabRequest(hash_str=run_.fab_hash) + # pylint: disable-next=W0212 + fab_res: GetFabResponse = driver._stub.GetFab(fab_req) + if fab_res.fab.hash_str != run_.fab_hash: + raise ValueError("FAB hashes don't match.") + install_from_fab(fab_res.fab.content, flwr_dir, True) + fab_id, fab_version = get_fab_metadata(fab_res.fab.content) + + app_path = str(get_project_dir(fab_id, fab_version, run_.fab_hash, flwr_dir)) config = get_project_config(app_path) else: # User provided `app_dir`, but not `--run-id` @@ -261,12 +272,12 @@ def _parse_args_run_server_app() -> argparse.ArgumentParser: ) parser.add_argument( "--server", - default=ADDRESS_DRIVER_API, + default=DRIVER_API_DEFAULT_ADDRESS, help="Server address", ) parser.add_argument( "--superlink", - default=ADDRESS_DRIVER_API, + default=DRIVER_API_DEFAULT_ADDRESS, help="SuperLink Driver API (gRPC-rere) address (IPv4, IPv6, or a domain name)", ) parser.add_argument( diff --git a/src/py/flwr/server/server.py b/src/py/flwr/server/server.py index 5e2a0c6b2719..bdaa11ba20a2 100644 --- a/src/py/flwr/server/server.py +++ b/src/py/flwr/server/server.py @@ -19,7 +19,7 @@ import io import timeit from logging import INFO, WARN -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( Code, @@ -41,17 +41,17 @@ from .server_config import ServerConfig -FitResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, FitRes]], - List[Union[Tuple[ClientProxy, FitRes], BaseException]], +FitResultsAndFailures = tuple[ + list[tuple[ClientProxy, FitRes]], + list[Union[tuple[ClientProxy, FitRes], BaseException]], ] -EvaluateResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, EvaluateRes]], - List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], +EvaluateResultsAndFailures = tuple[ + list[tuple[ClientProxy, EvaluateRes]], + list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], ] -ReconnectResultsAndFailures = Tuple[ - List[Tuple[ClientProxy, DisconnectRes]], - List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]], +ReconnectResultsAndFailures = tuple[ + list[tuple[ClientProxy, DisconnectRes]], + list[Union[tuple[ClientProxy, DisconnectRes], BaseException]], ] @@ -84,7 +84,7 @@ def client_manager(self) -> ClientManager: return self._client_manager # pylint: disable=too-many-locals - def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float]: + def fit(self, num_rounds: int, timeout: Optional[float]) -> tuple[History, float]: """Run federated averaging for a number of rounds.""" history = History() @@ -163,7 +163,7 @@ def evaluate_round( server_round: int, timeout: Optional[float], ) -> Optional[ - Tuple[Optional[float], Dict[str, Scalar], EvaluateResultsAndFailures] + tuple[Optional[float], dict[str, Scalar], EvaluateResultsAndFailures] ]: """Validate current global model on a number of clients.""" # Get clients and their respective instructions from strategy @@ -197,9 +197,9 @@ def evaluate_round( ) # Aggregate the evaluation results - aggregated_result: Tuple[ + aggregated_result: tuple[ Optional[float], - Dict[str, Scalar], + dict[str, Scalar], ] = self.strategy.aggregate_evaluate(server_round, results, failures) loss_aggregated, metrics_aggregated = aggregated_result @@ -210,7 +210,7 @@ def fit_round( server_round: int, timeout: Optional[float], ) -> Optional[ - Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + tuple[Optional[Parameters], dict[str, Scalar], FitResultsAndFailures] ]: """Perform a single round of federated averaging.""" # Get clients and their respective instructions from strategy @@ -245,9 +245,9 @@ def fit_round( ) # Aggregate training results - aggregated_result: Tuple[ + aggregated_result: tuple[ Optional[Parameters], - Dict[str, Scalar], + dict[str, Scalar], ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result @@ -296,7 +296,7 @@ def _get_initial_parameters( def reconnect_clients( - client_instructions: List[Tuple[ClientProxy, ReconnectIns]], + client_instructions: list[tuple[ClientProxy, ReconnectIns]], max_workers: Optional[int], timeout: Optional[float], ) -> ReconnectResultsAndFailures: @@ -312,8 +312,8 @@ def reconnect_clients( ) # Gather results - results: List[Tuple[ClientProxy, DisconnectRes]] = [] - failures: List[Union[Tuple[ClientProxy, DisconnectRes], BaseException]] = [] + results: list[tuple[ClientProxy, DisconnectRes]] = [] + failures: list[Union[tuple[ClientProxy, DisconnectRes], BaseException]] = [] for future in finished_fs: failure = future.exception() if failure is not None: @@ -328,7 +328,7 @@ def reconnect_client( client: ClientProxy, reconnect: ReconnectIns, timeout: Optional[float], -) -> Tuple[ClientProxy, DisconnectRes]: +) -> tuple[ClientProxy, DisconnectRes]: """Instruct client to disconnect and (optionally) reconnect later.""" disconnect = client.reconnect( reconnect, @@ -339,7 +339,7 @@ def reconnect_client( def fit_clients( - client_instructions: List[Tuple[ClientProxy, FitIns]], + client_instructions: list[tuple[ClientProxy, FitIns]], max_workers: Optional[int], timeout: Optional[float], group_id: int, @@ -356,8 +356,8 @@ def fit_clients( ) # Gather results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] for future in finished_fs: _handle_finished_future_after_fit( future=future, results=results, failures=failures @@ -367,7 +367,7 @@ def fit_clients( def fit_client( client: ClientProxy, ins: FitIns, timeout: Optional[float], group_id: int -) -> Tuple[ClientProxy, FitRes]: +) -> tuple[ClientProxy, FitRes]: """Refine parameters on a single client.""" fit_res = client.fit(ins, timeout=timeout, group_id=group_id) return client, fit_res @@ -375,8 +375,8 @@ def fit_client( def _handle_finished_future_after_fit( future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], ) -> None: """Convert finished future into either a result or a failure.""" # Check if there was an exception @@ -386,7 +386,7 @@ def _handle_finished_future_after_fit( return # Successfully received a result from a client - result: Tuple[ClientProxy, FitRes] = future.result() + result: tuple[ClientProxy, FitRes] = future.result() _, res = result # Check result status code @@ -399,7 +399,7 @@ def _handle_finished_future_after_fit( def evaluate_clients( - client_instructions: List[Tuple[ClientProxy, EvaluateIns]], + client_instructions: list[tuple[ClientProxy, EvaluateIns]], max_workers: Optional[int], timeout: Optional[float], group_id: int, @@ -416,8 +416,8 @@ def evaluate_clients( ) # Gather results - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] for future in finished_fs: _handle_finished_future_after_evaluate( future=future, results=results, failures=failures @@ -430,7 +430,7 @@ def evaluate_client( ins: EvaluateIns, timeout: Optional[float], group_id: int, -) -> Tuple[ClientProxy, EvaluateRes]: +) -> tuple[ClientProxy, EvaluateRes]: """Evaluate parameters on a single client.""" evaluate_res = client.evaluate(ins, timeout=timeout, group_id=group_id) return client, evaluate_res @@ -438,8 +438,8 @@ def evaluate_client( def _handle_finished_future_after_evaluate( future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], ) -> None: """Convert finished future into either a result or a failure.""" # Check if there was an exception @@ -449,7 +449,7 @@ def _handle_finished_future_after_evaluate( return # Successfully received a result from a client - result: Tuple[ClientProxy, EvaluateRes] = future.result() + result: tuple[ClientProxy, EvaluateRes] = future.result() _, res = result # Check result status code @@ -466,7 +466,7 @@ def init_defaults( config: Optional[ServerConfig], strategy: Optional[Strategy], client_manager: Optional[ClientManager], -) -> Tuple[Server, ServerConfig]: +) -> tuple[Server, ServerConfig]: """Create server instance if none was given.""" if server is None: if client_manager is None: diff --git a/src/py/flwr/server/server_app.py b/src/py/flwr/server/server_app.py index e9cb4ddcaf0d..9d91be88e94e 100644 --- a/src/py/flwr/server/server_app.py +++ b/src/py/flwr/server/server_app.py @@ -71,7 +71,7 @@ class ServerApp: >>> print("ServerApp running") """ - # pylint: disable=too-many-arguments + # pylint: disable=too-many-arguments,too-many-positional-arguments def __init__( self, server: Optional[Server] = None, diff --git a/src/py/flwr/server/server_test.py b/src/py/flwr/server/server_test.py index f47b5c3d8469..6e8f423fe115 100644 --- a/src/py/flwr/server/server_test.py +++ b/src/py/flwr/server/server_test.py @@ -19,7 +19,7 @@ import csv import tempfile from pathlib import Path -from typing import List, Optional +from typing import Optional import numpy as np from cryptography.hazmat.primitives.asymmetric import ec @@ -55,7 +55,7 @@ ) from flwr.server.client_manager import SimpleClientManager -from .app import _try_setup_client_authentication +from .app import _try_setup_node_authentication from .client_proxy import ClientProxy from .server import Server, evaluate_clients, fit_clients @@ -143,7 +143,7 @@ def reconnect( def test_fit_clients() -> None: """Test fit_clients.""" # Prepare - clients: List[ClientProxy] = [ + clients: list[ClientProxy] = [ FailingClient("0"), SuccessClient("1"), ] @@ -164,7 +164,7 @@ def test_fit_clients() -> None: def test_eval_clients() -> None: """Test eval_clients.""" # Prepare - clients: List[ClientProxy] = [ + clients: list[ClientProxy] = [ FailingClient("0"), SuccessClient("1"), ] @@ -203,8 +203,8 @@ def test_set_max_workers() -> None: assert server.max_workers == 42 -def test_setup_client_auth() -> None: # pylint: disable=R0914 - """Test setup client authentication.""" +def test_setup_node_auth() -> None: # pylint: disable=R0914 + """Test setup node authentication.""" # Prepare _, first_public_key = generate_key_pairs() private_key, public_key = generate_key_pairs() @@ -220,12 +220,12 @@ def test_setup_client_auth() -> None: # pylint: disable=R0914 # Execute with tempfile.TemporaryDirectory() as temp_dir: # Initialize temporary files - client_keys_file_path = Path(temp_dir) / "client_keys.csv" + node_keys_file_path = Path(temp_dir) / "node_keys.csv" server_private_key_path = Path(temp_dir) / "server_private_key" server_public_key_path = Path(temp_dir) / "server_public_key" # Fill the files with relevant keys - with open(client_keys_file_path, "w", newline="", encoding="utf-8") as csvfile: + with open(node_keys_file_path, "w", newline="", encoding="utf-8") as csvfile: writer = csv.writer(csvfile) writer.writerow( [ @@ -240,15 +240,15 @@ def test_setup_client_auth() -> None: # pylint: disable=R0914 server_public_key_path.write_bytes(server_public_key) server_private_key_path.write_bytes(server_private_key) - # Mock argparse with `require-client-authentication`` flag + # Mock argparse with `require-node-authentication`` flag mock_args = argparse.Namespace( - auth_list_public_keys=str(client_keys_file_path), + auth_list_public_keys=str(node_keys_file_path), auth_superlink_private_key=str(server_private_key_path), auth_superlink_public_key=str(server_public_key_path), ) - # Run _try_setup_client_authentication - result = _try_setup_client_authentication(mock_args, (b"", b"", b"")) + # Run _try_setup_node_authentication + result = _try_setup_node_authentication(mock_args, (b"", b"", b"")) expected_private_key = load_ssh_private_key(server_private_key, None) expected_public_key = load_ssh_public_key(server_public_key) diff --git a/src/py/flwr/server/superlink/state/__init__.py b/src/py/flwr/server/serverapp/__init__.py similarity index 69% rename from src/py/flwr/server/superlink/state/__init__.py rename to src/py/flwr/server/serverapp/__init__.py index 9d3bd220403b..2873438e3c60 100644 --- a/src/py/flwr/server/superlink/state/__init__.py +++ b/src/py/flwr/server/serverapp/__init__.py @@ -12,17 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Flower server state.""" +"""Flower AppIO service.""" -from .in_memory_state import InMemoryState as InMemoryState -from .sqlite_state import SqliteState as SqliteState -from .state import State as State -from .state_factory import StateFactory as StateFactory +from .app import flwr_serverapp as flwr_serverapp __all__ = [ - "InMemoryState", - "SqliteState", - "State", - "StateFactory", + "flwr_serverapp", ] diff --git a/src/py/flwr/server/serverapp/app.py b/src/py/flwr/server/serverapp/app.py new file mode 100644 index 000000000000..a02761372097 --- /dev/null +++ b/src/py/flwr/server/serverapp/app.py @@ -0,0 +1,20 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower ServerApp process.""" + + +def flwr_serverapp() -> None: + """Run process-isolated Flower ServerApp.""" + raise NotImplementedError() diff --git a/src/py/flwr/server/strategy/aggregate.py b/src/py/flwr/server/strategy/aggregate.py index c668b55eebe6..94beacba0087 100644 --- a/src/py/flwr/server/strategy/aggregate.py +++ b/src/py/flwr/server/strategy/aggregate.py @@ -15,8 +15,8 @@ """Aggregation functions for strategy implementations.""" # mypy: disallow_untyped_calls=False -from functools import reduce -from typing import Any, Callable, List, Tuple +from functools import partial, reduce +from typing import Any, Callable, Union import numpy as np @@ -24,7 +24,7 @@ from flwr.server.client_proxy import ClientProxy -def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: +def aggregate(results: list[tuple[NDArrays, int]]) -> NDArrays: """Compute weighted average.""" # Calculate the total number of examples used during training num_examples_total = sum(num_examples for (_, num_examples) in results) @@ -42,7 +42,7 @@ def aggregate(results: List[Tuple[NDArrays, int]]) -> NDArrays: return weights_prime -def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: +def aggregate_inplace(results: list[tuple[ClientProxy, FitRes]]) -> NDArrays: """Compute in-place weighted average.""" # Count total examples num_examples_total = sum(fit_res.num_examples for (_, fit_res) in results) @@ -52,22 +52,36 @@ def aggregate_inplace(results: List[Tuple[ClientProxy, FitRes]]) -> NDArrays: fit_res.num_examples / num_examples_total for _, fit_res in results ] + def _try_inplace( + x: NDArray, y: Union[NDArray, float], np_binary_op: np.ufunc + ) -> NDArray: + return ( # type: ignore[no-any-return] + np_binary_op(x, y, out=x) + if np.can_cast(y, x.dtype, casting="same_kind") + else np_binary_op(x, np.array(y, x.dtype), out=x) + ) + # Let's do in-place aggregation # Get first result, then add up each other params = [ - scaling_factors[0] * x for x in parameters_to_ndarrays(results[0][1].parameters) + _try_inplace(x, scaling_factors[0], np_binary_op=np.multiply) + for x in parameters_to_ndarrays(results[0][1].parameters) ] - for i, (_, fit_res) in enumerate(results[1:]): + + for i, (_, fit_res) in enumerate(results[1:], start=1): res = ( - scaling_factors[i + 1] * x + _try_inplace(x, scaling_factors[i], np_binary_op=np.multiply) for x in parameters_to_ndarrays(fit_res.parameters) ) - params = [reduce(np.add, layer_updates) for layer_updates in zip(params, res)] + params = [ + reduce(partial(_try_inplace, np_binary_op=np.add), layer_updates) + for layer_updates in zip(params, res) + ] return params -def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: +def aggregate_median(results: list[tuple[NDArrays, int]]) -> NDArrays: """Compute median.""" # Create a list of weights and ignore the number of examples weights = [weights for weights, _ in results] @@ -80,7 +94,7 @@ def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays: def aggregate_krum( - results: List[Tuple[NDArrays, int]], num_malicious: int, to_keep: int + results: list[tuple[NDArrays, int]], num_malicious: int, to_keep: int ) -> NDArrays: """Choose one parameter vector according to the Krum function. @@ -119,7 +133,7 @@ def aggregate_krum( # pylint: disable=too-many-locals def aggregate_bulyan( - results: List[Tuple[NDArrays, int]], + results: list[tuple[NDArrays, int]], num_malicious: int, aggregation_rule: Callable, # type: ignore **aggregation_rule_kwargs: Any, @@ -128,7 +142,7 @@ def aggregate_bulyan( Parameters ---------- - results: List[Tuple[NDArrays, int]] + results: list[tuple[NDArrays, int]] Weights and number of samples for each of the client. num_malicious: int The maximum number of malicious clients. @@ -155,7 +169,7 @@ def aggregate_bulyan( "It is needed to ensure that the method reduces the attacker's leeway to " "the one proved in the paper." ) - selected_models_set: List[Tuple[NDArrays, int]] = [] + selected_models_set: list[tuple[NDArrays, int]] = [] theta = len(results) - 2 * num_malicious beta = theta - 2 * num_malicious @@ -200,7 +214,7 @@ def aggregate_bulyan( return parameters_aggregated -def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: +def weighted_loss_avg(results: list[tuple[int, float]]) -> float: """Aggregate evaluation results obtained from multiple clients.""" num_total_evaluation_examples = sum(num_examples for (num_examples, _) in results) weighted_losses = [num_examples * loss for num_examples, loss in results] @@ -208,7 +222,7 @@ def weighted_loss_avg(results: List[Tuple[int, float]]) -> float: def aggregate_qffl( - parameters: NDArrays, deltas: List[NDArrays], hs_fll: List[NDArrays] + parameters: NDArrays, deltas: list[NDArrays], hs_fll: list[NDArrays] ) -> NDArrays: """Compute weighted average based on Q-FFL paper.""" demominator: float = np.sum(np.asarray(hs_fll)) @@ -225,7 +239,7 @@ def aggregate_qffl( return new_parameters -def _compute_distances(weights: List[NDArrays]) -> NDArray: +def _compute_distances(weights: list[NDArrays]) -> NDArray: """Compute distances between vectors. Input: weights - list of weights vectors @@ -265,7 +279,7 @@ def _trim_mean(array: NDArray, proportiontocut: float) -> NDArray: def aggregate_trimmed_avg( - results: List[Tuple[NDArrays, int]], proportiontocut: float + results: list[tuple[NDArrays, int]], proportiontocut: float ) -> NDArrays: """Compute trimmed average.""" # Create a list of weights and ignore the number of examples @@ -290,7 +304,7 @@ def _check_weights_equality(weights1: NDArrays, weights2: NDArrays) -> bool: def _find_reference_weights( - reference_weights: NDArrays, list_of_weights: List[NDArrays] + reference_weights: NDArrays, list_of_weights: list[NDArrays] ) -> int: """Find the reference weights by looping through the `list_of_weights`. @@ -320,7 +334,7 @@ def _find_reference_weights( def _aggregate_n_closest_weights( - reference_weights: NDArrays, results: List[Tuple[NDArrays, int]], beta_closest: int + reference_weights: NDArrays, results: list[tuple[NDArrays, int]], beta_closest: int ) -> NDArrays: """Calculate element-wise mean of the `N` closest values. @@ -332,7 +346,7 @@ def _aggregate_n_closest_weights( ---------- reference_weights: NDArrays The weights from which the distances will be computed - results: List[Tuple[NDArrays, int]] + results: list[tuple[NDArrays, int]] The weights from models beta_closest: int The number of the closest distance weights that will be averaged diff --git a/src/py/flwr/server/strategy/aggregate_test.py b/src/py/flwr/server/strategy/aggregate_test.py index f8b4e3c03b50..9f9dba79ec7c 100644 --- a/src/py/flwr/server/strategy/aggregate_test.py +++ b/src/py/flwr/server/strategy/aggregate_test.py @@ -15,8 +15,6 @@ """Aggregation function tests.""" -from typing import List, Tuple - import numpy as np from .aggregate import ( @@ -49,7 +47,7 @@ def test_aggregate() -> None: def test_weighted_loss_avg_single_value() -> None: """Test weighted loss averaging.""" # Prepare - results: List[Tuple[int, float]] = [(5, 0.5)] + results: list[tuple[int, float]] = [(5, 0.5)] expected = 0.5 # Execute @@ -62,7 +60,7 @@ def test_weighted_loss_avg_single_value() -> None: def test_weighted_loss_avg_multiple_values() -> None: """Test weighted loss averaging.""" # Prepare - results: List[Tuple[int, float]] = [(1, 2.0), (2, 1.0), (1, 2.0)] + results: list[tuple[int, float]] = [(1, 2.0), (2, 1.0), (1, 2.0)] expected = 1.5 # Execute diff --git a/src/py/flwr/server/strategy/bulyan.py b/src/py/flwr/server/strategy/bulyan.py index a81406c255ad..84a261237ac5 100644 --- a/src/py/flwr/server/strategy/bulyan.py +++ b/src/py/flwr/server/strategy/bulyan.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Optional, Union from flwr.common import ( FitRes, @@ -86,12 +86,12 @@ def __init__( num_malicious_clients: int = 0, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -125,9 +125,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using Bulyan.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/bulyan_test.py b/src/py/flwr/server/strategy/bulyan_test.py index 93a9ebda3783..f5b7282fed2c 100644 --- a/src/py/flwr/server/strategy/bulyan_test.py +++ b/src/py/flwr/server/strategy/bulyan_test.py @@ -15,7 +15,6 @@ """Bulyan tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -62,7 +61,7 @@ def test_aggregate_fit() -> None: param_5: Parameters = ndarrays_to_parameters( [array([0.1, 0.1, 0.1, 0.1], dtype=float32)] ) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -125,7 +124,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/dp_adaptive_clipping.py b/src/py/flwr/server/strategy/dp_adaptive_clipping.py index b25e1efdf0e9..c64091091c51 100644 --- a/src/py/flwr/server/strategy/dp_adaptive_clipping.py +++ b/src/py/flwr/server/strategy/dp_adaptive_clipping.py @@ -20,7 +20,7 @@ import math from logging import INFO, WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np @@ -88,7 +88,7 @@ class DifferentialPrivacyServerSideAdaptiveClipping(Strategy): >>> ) """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -156,14 +156,14 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.current_round_params = parameters_to_ndarrays(parameters) return self.strategy.configure_fit(server_round, parameters, client_manager) def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -172,9 +172,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results and update clip norms.""" if failures: return None, {} @@ -245,15 +245,15 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) @@ -307,7 +307,7 @@ class DifferentialPrivacyClientSideAdaptiveClipping(Strategy): >>> ) """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -372,7 +372,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {KEY_CLIPPING_NORM: self.clipping_norm} inner_strategy_config_result = self.strategy.configure_fit( @@ -385,7 +385,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -394,9 +394,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results and update clip norms.""" if failures: return None, {} @@ -432,7 +432,7 @@ def aggregate_fit( return aggregated_params, metrics - def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: + def _update_clip_norm(self, results: list[tuple[ClientProxy, FitRes]]) -> None: # Calculate the number of clients which set the norm indicator bit norm_bit_set_count = 0 for client_proxy, fit_res in results: @@ -457,14 +457,14 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/dp_fixed_clipping.py b/src/py/flwr/server/strategy/dp_fixed_clipping.py index 92b2845fd846..2ca253c96370 100644 --- a/src/py/flwr/server/strategy/dp_fixed_clipping.py +++ b/src/py/flwr/server/strategy/dp_fixed_clipping.py @@ -19,7 +19,7 @@ from logging import INFO, WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( EvaluateIns, @@ -117,14 +117,14 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" self.current_round_params = parameters_to_ndarrays(parameters) return self.strategy.configure_fit(server_round, parameters, client_manager) def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -133,9 +133,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Compute the updates, clip, and pass them for aggregation. Afterward, add noise to the aggregated parameters. @@ -191,15 +191,15 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) @@ -285,7 +285,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {KEY_CLIPPING_NORM: self.clipping_norm} inner_strategy_config_result = self.strategy.configure_fit( @@ -298,7 +298,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" return self.strategy.configure_evaluate( server_round, parameters, client_manager @@ -307,9 +307,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Add noise to the aggregated parameters.""" if failures: return None, {} @@ -348,14 +348,14 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/dpfedavg_adaptive.py b/src/py/flwr/server/strategy/dpfedavg_adaptive.py index 423ddddeb379..170c9d619a7d 100644 --- a/src/py/flwr/server/strategy/dpfedavg_adaptive.py +++ b/src/py/flwr/server/strategy/dpfedavg_adaptive.py @@ -19,7 +19,7 @@ import math -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union import numpy as np @@ -39,7 +39,7 @@ class DPFedAvgAdaptive(DPFedAvgFixed): This class is deprecated and will be removed in a future release. """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -80,7 +80,7 @@ def __repr__(self) -> str: def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" additional_config = {"dpfedavg_adaptive_clip_enabled": True} @@ -93,7 +93,7 @@ def configure_fit( return client_instructions - def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: + def _update_clip_norm(self, results: list[tuple[ClientProxy, FitRes]]) -> None: # Calculating number of clients which set the norm indicator bit norm_bit_set_count = 0 for client_proxy, fit_res in results: @@ -118,9 +118,9 @@ def _update_clip_norm(self, results: List[Tuple[ClientProxy, FitRes]]) -> None: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results as in DPFedAvgFixed and update clip norms.""" if failures: return None, {} diff --git a/src/py/flwr/server/strategy/dpfedavg_fixed.py b/src/py/flwr/server/strategy/dpfedavg_fixed.py index d122f0688922..60f8c16f8e6d 100644 --- a/src/py/flwr/server/strategy/dpfedavg_fixed.py +++ b/src/py/flwr/server/strategy/dpfedavg_fixed.py @@ -17,7 +17,7 @@ Paper: arxiv.org/pdf/1710.06963.pdf """ -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.dp import add_gaussian_noise @@ -36,7 +36,7 @@ class DPFedAvgFixed(Strategy): This class is deprecated and will be removed in a future release. """ - # pylint: disable=too-many-arguments,too-many-instance-attributes + # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-positional-arguments def __init__( self, strategy: Strategy, @@ -79,7 +79,7 @@ def initialize_parameters( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training incorporating Differential Privacy (DP). Configuration of the next training round includes information related to DP, @@ -119,7 +119,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation using the specified strategy. Parameters @@ -147,9 +147,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results using unweighted aggregation.""" if failures: return None, {} @@ -168,14 +168,14 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using the given strategy.""" return self.strategy.aggregate_evaluate(server_round, results, failures) def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function from the strategy.""" return self.strategy.evaluate(server_round, parameters) diff --git a/src/py/flwr/server/strategy/fault_tolerant_fedavg.py b/src/py/flwr/server/strategy/fault_tolerant_fedavg.py index 663ac8872c39..60213db2efeb 100644 --- a/src/py/flwr/server/strategy/fault_tolerant_fedavg.py +++ b/src/py/flwr/server/strategy/fault_tolerant_fedavg.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( EvaluateRes, @@ -49,12 +49,12 @@ def __init__( min_available_clients: int = 1, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, min_completion_rate_fit: float = 0.5, min_completion_rate_evaluate: float = 0.5, initial_parameters: Optional[Parameters] = None, @@ -85,9 +85,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -117,9 +117,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py b/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py index 98f4cac032cb..a01a3a5c0ad5 100644 --- a/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py +++ b/src/py/flwr/server/strategy/fault_tolerant_fedavg_test.py @@ -15,7 +15,7 @@ """FaultTolerantFedAvg tests.""" -from typing import List, Optional, Tuple, Union +from typing import Optional, Union from unittest.mock import MagicMock from flwr.common import ( @@ -36,8 +36,8 @@ def test_aggregate_fit_no_results_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.1) - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: Optional[Parameters] = None # Execute @@ -51,8 +51,8 @@ def test_aggregate_fit_no_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.1) - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [Exception()] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [Exception()] expected: Optional[Parameters] = None # Execute @@ -66,7 +66,7 @@ def test_aggregate_fit_not_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.5) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -77,7 +77,7 @@ def test_aggregate_fit_not_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [ Exception(), Exception(), ] @@ -94,7 +94,7 @@ def test_aggregate_fit_just_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.5) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -105,7 +105,7 @@ def test_aggregate_fit_just_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [Exception()] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [Exception()] expected: Optional[NDArrays] = [] # Execute @@ -120,7 +120,7 @@ def test_aggregate_fit_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_fit=0.99) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -131,7 +131,7 @@ def test_aggregate_fit_no_failures() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: Optional[NDArrays] = [] # Execute @@ -146,8 +146,8 @@ def test_aggregate_evaluate_no_results_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.1) - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] expected: Optional[float] = None # Execute @@ -161,8 +161,8 @@ def test_aggregate_evaluate_no_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.1) - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception() ] expected: Optional[float] = None @@ -178,7 +178,7 @@ def test_aggregate_evaluate_not_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.5) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -189,7 +189,7 @@ def test_aggregate_evaluate_not_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception(), Exception(), ] @@ -206,7 +206,7 @@ def test_aggregate_evaluate_just_enough_results() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.5) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -217,7 +217,7 @@ def test_aggregate_evaluate_just_enough_results() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [ + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [ Exception() ] expected: Optional[float] = 2.3 @@ -233,7 +233,7 @@ def test_aggregate_evaluate_no_failures() -> None: """Test evaluate function.""" # Prepare strategy = FaultTolerantFedAvg(min_completion_rate_evaluate=0.99) - results: List[Tuple[ClientProxy, EvaluateRes]] = [ + results: list[tuple[ClientProxy, EvaluateRes]] = [ ( MagicMock(), EvaluateRes( @@ -244,7 +244,7 @@ def test_aggregate_evaluate_no_failures() -> None: ), ) ] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] expected: Optional[float] = 2.3 # Execute diff --git a/src/py/flwr/server/strategy/fedadagrad.py b/src/py/flwr/server/strategy/fedadagrad.py index f13c5358da25..75befdd0e796 100644 --- a/src/py/flwr/server/strategy/fedadagrad.py +++ b/src/py/flwr/server/strategy/fedadagrad.py @@ -20,7 +20,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -89,12 +89,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, evaluate_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, accept_failures: bool = True, @@ -131,9 +131,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/fedadagrad_test.py b/src/py/flwr/server/strategy/fedadagrad_test.py index b43a4c75d123..6ac217b021b4 100644 --- a/src/py/flwr/server/strategy/fedadagrad_test.py +++ b/src/py/flwr/server/strategy/fedadagrad_test.py @@ -15,7 +15,6 @@ """FedAdagrad tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -54,7 +53,7 @@ def test_aggregate_fit() -> None: bridge = MagicMock() client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -80,7 +79,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/fedadam.py b/src/py/flwr/server/strategy/fedadam.py index dc90e90c7568..d0f87a43f79b 100644 --- a/src/py/flwr/server/strategy/fedadam.py +++ b/src/py/flwr/server/strategy/fedadam.py @@ -20,7 +20,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -93,12 +93,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -137,9 +137,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/fedavg.py b/src/py/flwr/server/strategy/fedavg.py index 3b9b2640c2b5..2d0b855c3186 100644 --- a/src/py/flwr/server/strategy/fedavg.py +++ b/src/py/flwr/server/strategy/fedavg.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( EvaluateIns, @@ -99,12 +99,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -138,12 +138,12 @@ def __repr__(self) -> str: rep = f"FedAvg(accept_failures={self.accept_failures})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients @@ -158,7 +158,7 @@ def initialize_parameters( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -172,7 +172,7 @@ def evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -193,7 +193,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction eval is 0. if self.fraction_evaluate == 0.0: @@ -220,9 +220,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -256,9 +256,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavg_android.py b/src/py/flwr/server/strategy/fedavg_android.py index 2f49cf8784c9..bcecf8efb504 100644 --- a/src/py/flwr/server/strategy/fedavg_android.py +++ b/src/py/flwr/server/strategy/fedavg_android.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast import numpy as np @@ -81,12 +81,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, ) -> None: @@ -107,12 +107,12 @@ def __repr__(self) -> str: rep = f"FedAvg(accept_failures={self.accept_failures})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients @@ -127,7 +127,7 @@ def initialize_parameters( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -141,7 +141,7 @@ def evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -162,7 +162,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction_evaluate is 0 if self.fraction_evaluate == 0.0: @@ -189,9 +189,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -208,9 +208,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavg_test.py b/src/py/flwr/server/strategy/fedavg_test.py index e62eaa5c5832..66241c3ab66a 100644 --- a/src/py/flwr/server/strategy/fedavg_test.py +++ b/src/py/flwr/server/strategy/fedavg_test.py @@ -15,7 +15,7 @@ """FedAvg tests.""" -from typing import List, Tuple, Union +from typing import Union from unittest.mock import MagicMock import numpy as np @@ -140,7 +140,7 @@ def test_inplace_aggregate_fit_equivalence() -> None: weights1_0 = np.random.randn(100, 64) weights1_1 = np.random.randn(314, 628, 3) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -160,7 +160,7 @@ def test_inplace_aggregate_fit_equivalence() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] fedavg_reference = FedAvg(inplace=False) fedavg_inplace = FedAvg() diff --git a/src/py/flwr/server/strategy/fedavgm.py b/src/py/flwr/server/strategy/fedavgm.py index ab3d37249db6..a7c37c38770f 100644 --- a/src/py/flwr/server/strategy/fedavgm.py +++ b/src/py/flwr/server/strategy/fedavgm.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -84,12 +84,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -132,9 +132,9 @@ def initialize_parameters( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedavgm_test.py b/src/py/flwr/server/strategy/fedavgm_test.py index 39da5f4b82c4..400fa3c97247 100644 --- a/src/py/flwr/server/strategy/fedavgm_test.py +++ b/src/py/flwr/server/strategy/fedavgm_test.py @@ -15,7 +15,7 @@ """FedAvgM tests.""" -from typing import List, Tuple, Union +from typing import Union from unittest.mock import MagicMock from numpy import array, float32 @@ -41,7 +41,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None: array([0, 0, 0, 0], dtype=float32), ] - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -61,7 +61,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: NDArrays = [ array([[1, 2, 3], [4, 5, 6]], dtype=float32), array([7, 8, 9, 10], dtype=float32), @@ -94,7 +94,7 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None: array([0, 0, 0, 0], dtype=float32), ] - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( MagicMock(), FitRes( @@ -114,7 +114,7 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None: ), ), ] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] expected: NDArrays = [ array([[1, 2, 3], [4, 5, 6]], dtype=float32), array([7, 8, 9, 10], dtype=float32), diff --git a/src/py/flwr/server/strategy/fedmedian.py b/src/py/flwr/server/strategy/fedmedian.py index e7cba5324fa8..35044d42b22c 100644 --- a/src/py/flwr/server/strategy/fedmedian.py +++ b/src/py/flwr/server/strategy/fedmedian.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import ( FitRes, @@ -46,9 +46,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using median.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedmedian_test.py b/src/py/flwr/server/strategy/fedmedian_test.py index 3960ad70b145..bbce69c19ac5 100644 --- a/src/py/flwr/server/strategy/fedmedian_test.py +++ b/src/py/flwr/server/strategy/fedmedian_test.py @@ -15,7 +15,6 @@ """FedMedian tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -159,7 +158,7 @@ def test_aggregate_fit() -> None: client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -194,7 +193,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/fedopt.py b/src/py/flwr/server/strategy/fedopt.py index c581d4797123..3e143fc3ca59 100644 --- a/src/py/flwr/server/strategy/fedopt.py +++ b/src/py/flwr/server/strategy/fedopt.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, Optional, Tuple +from typing import Callable, Optional from flwr.common import ( MetricsAggregationFn, @@ -86,12 +86,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, diff --git a/src/py/flwr/server/strategy/fedprox.py b/src/py/flwr/server/strategy/fedprox.py index f15271e06060..218fece0491f 100644 --- a/src/py/flwr/server/strategy/fedprox.py +++ b/src/py/flwr/server/strategy/fedprox.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple +from typing import Callable, Optional from flwr.common import FitIns, MetricsAggregationFn, NDArrays, Parameters, Scalar from flwr.server.client_manager import ClientManager @@ -113,12 +113,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -148,7 +148,7 @@ def __repr__(self) -> str: def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training. Sends the proximal factor mu to the clients diff --git a/src/py/flwr/server/strategy/fedtrimmedavg.py b/src/py/flwr/server/strategy/fedtrimmedavg.py index 96b0d35e7a61..8a0e4e50fbff 100644 --- a/src/py/flwr/server/strategy/fedtrimmedavg.py +++ b/src/py/flwr/server/strategy/fedtrimmedavg.py @@ -17,7 +17,7 @@ Paper: arxiv.org/abs/1803.01498 """ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -78,12 +78,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -114,9 +114,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using trimmed average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedxgb_bagging.py b/src/py/flwr/server/strategy/fedxgb_bagging.py index a74ee81976a6..1e55466808f8 100644 --- a/src/py/flwr/server/strategy/fedxgb_bagging.py +++ b/src/py/flwr/server/strategy/fedxgb_bagging.py @@ -17,7 +17,7 @@ import json from logging import WARNING -from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Callable, Optional, Union, cast from flwr.common import EvaluateRes, FitRes, Parameters, Scalar from flwr.common.logger import log @@ -34,8 +34,8 @@ def __init__( self, evaluate_function: Optional[ Callable[ - [int, Parameters, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, Parameters, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, **kwargs: Any, @@ -52,9 +52,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using bagging.""" if not results: return None, {} @@ -79,9 +79,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation metrics using average.""" if not results: return None, {} @@ -101,7 +101,7 @@ def aggregate_evaluate( def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_function is None: # No evaluation function provided @@ -152,7 +152,7 @@ def aggregate( return bst_prev_bytes -def _get_tree_nums(xgb_model_org: bytes) -> Tuple[int, int]: +def _get_tree_nums(xgb_model_org: bytes) -> tuple[int, int]: xgb_model = json.loads(bytearray(xgb_model_org)) # Get the number of trees tree_num = int( diff --git a/src/py/flwr/server/strategy/fedxgb_cyclic.py b/src/py/flwr/server/strategy/fedxgb_cyclic.py index 75025a89728b..c2dc3d797c7e 100644 --- a/src/py/flwr/server/strategy/fedxgb_cyclic.py +++ b/src/py/flwr/server/strategy/fedxgb_cyclic.py @@ -16,7 +16,7 @@ from logging import WARNING -from typing import Any, Dict, List, Optional, Tuple, Union, cast +from typing import Any, Optional, Union, cast from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.common.logger import log @@ -45,9 +45,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using bagging.""" if not results: return None, {} @@ -69,9 +69,9 @@ def aggregate_fit( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation metrics using average.""" if not results: return None, {} @@ -91,7 +91,7 @@ def aggregate_evaluate( def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: @@ -117,7 +117,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction eval is 0. if self.fraction_evaluate == 0.0: diff --git a/src/py/flwr/server/strategy/fedxgb_nn_avg.py b/src/py/flwr/server/strategy/fedxgb_nn_avg.py index 4562663287ae..a7da4a919af7 100644 --- a/src/py/flwr/server/strategy/fedxgb_nn_avg.py +++ b/src/py/flwr/server/strategy/fedxgb_nn_avg.py @@ -22,7 +22,7 @@ from logging import WARNING -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Optional, Union from flwr.common import FitRes, Scalar, ndarrays_to_parameters, parameters_to_ndarrays from flwr.common.logger import log, warn_deprecated_feature @@ -56,7 +56,7 @@ def __repr__(self) -> str: def evaluate( self, server_round: int, parameters: Any - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate model parameters using an evaluation function.""" if self.evaluate_fn is None: # No evaluation function provided @@ -70,9 +70,9 @@ def evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Any], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Any], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/fedyogi.py b/src/py/flwr/server/strategy/fedyogi.py index c7b2ebb51667..11873d1b781f 100644 --- a/src/py/flwr/server/strategy/fedyogi.py +++ b/src/py/flwr/server/strategy/fedyogi.py @@ -18,7 +18,7 @@ """ -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -93,12 +93,12 @@ def __init__( min_available_clients: int = 2, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Parameters, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -137,9 +137,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" fedavg_parameters_aggregated, metrics_aggregated = super().aggregate_fit( server_round=server_round, results=results, failures=failures diff --git a/src/py/flwr/server/strategy/krum.py b/src/py/flwr/server/strategy/krum.py index 074d018c35a3..5d33874b9789 100644 --- a/src/py/flwr/server/strategy/krum.py +++ b/src/py/flwr/server/strategy/krum.py @@ -21,7 +21,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union from flwr.common import ( FitRes, @@ -87,12 +87,12 @@ def __init__( num_clients_to_keep: int = 0, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -123,9 +123,9 @@ def __repr__(self) -> str: def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using Krum.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/krum_test.py b/src/py/flwr/server/strategy/krum_test.py index b34982325b39..ac068a8e6ba6 100644 --- a/src/py/flwr/server/strategy/krum_test.py +++ b/src/py/flwr/server/strategy/krum_test.py @@ -15,7 +15,6 @@ """Krum tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -160,7 +159,7 @@ def test_aggregate_fit() -> None: client_0 = GrpcClientProxy(cid="0", bridge=bridge) client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -195,7 +194,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/multikrum_test.py b/src/py/flwr/server/strategy/multikrum_test.py index 7a1a4c3ecf38..d9c73fb4eb8f 100644 --- a/src/py/flwr/server/strategy/multikrum_test.py +++ b/src/py/flwr/server/strategy/multikrum_test.py @@ -15,7 +15,6 @@ """Krum tests.""" -from typing import List, Tuple from unittest.mock import MagicMock from numpy import array, float32 @@ -59,7 +58,7 @@ def test_aggregate_fit() -> None: client_1 = GrpcClientProxy(cid="1", bridge=bridge) client_2 = GrpcClientProxy(cid="2", bridge=bridge) - results: List[Tuple[ClientProxy, FitRes]] = [ + results: list[tuple[ClientProxy, FitRes]] = [ ( client_0, FitRes( @@ -94,7 +93,7 @@ def test_aggregate_fit() -> None: actual_aggregated, _ = strategy.aggregate_fit( server_round=1, results=results, failures=[] ) - if actual_aggregated: - actual_list = parameters_to_ndarrays(actual_aggregated) - actual = actual_list[0] + assert actual_aggregated + actual_list = parameters_to_ndarrays(actual_aggregated) + actual = actual_list[0] assert (actual == expected[0]).all() diff --git a/src/py/flwr/server/strategy/qfedavg.py b/src/py/flwr/server/strategy/qfedavg.py index 26a397d4cf8c..30a3cc53ee94 100644 --- a/src/py/flwr/server/strategy/qfedavg.py +++ b/src/py/flwr/server/strategy/qfedavg.py @@ -19,7 +19,7 @@ from logging import WARNING -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Optional, Union import numpy as np @@ -60,12 +60,12 @@ def __init__( min_available_clients: int = 1, evaluate_fn: Optional[ Callable[ - [int, NDArrays, Dict[str, Scalar]], - Optional[Tuple[float, Dict[str, Scalar]]], + [int, NDArrays, dict[str, Scalar]], + Optional[tuple[float, dict[str, Scalar]]], ] ] = None, - on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, - on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None, + on_fit_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, + on_evaluate_config_fn: Optional[Callable[[int], dict[str, Scalar]]] = None, accept_failures: bool = True, initial_parameters: Optional[Parameters] = None, fit_metrics_aggregation_fn: Optional[MetricsAggregationFn] = None, @@ -95,19 +95,19 @@ def __repr__(self) -> str: rep += f"q_param={self.q_param}, pre_weights={self.pre_weights})" return rep - def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_fit_clients(self, num_available_clients: int) -> tuple[int, int]: """Return the sample size and the required number of available clients.""" num_clients = int(num_available_clients * self.fraction_fit) return max(num_clients, self.min_fit_clients), self.min_available_clients - def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]: + def num_evaluation_clients(self, num_available_clients: int) -> tuple[int, int]: """Use a fraction of available clients for evaluation.""" num_clients = int(num_available_clients * self.fraction_evaluate) return max(num_clients, self.min_evaluate_clients), self.min_available_clients def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" weights = parameters_to_ndarrays(parameters) self.pre_weights = weights @@ -131,7 +131,7 @@ def configure_fit( def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation.""" # Do not configure federated evaluation if fraction_evaluate is 0 if self.fraction_evaluate == 0.0: @@ -158,9 +158,9 @@ def configure_evaluate( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: return None, {} @@ -229,9 +229,9 @@ def norm_grad(grad_list: NDArrays) -> float: def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation losses using weighted average.""" if not results: return None, {} diff --git a/src/py/flwr/server/strategy/strategy.py b/src/py/flwr/server/strategy/strategy.py index cfdfe2e246c5..14999e9a8993 100644 --- a/src/py/flwr/server/strategy/strategy.py +++ b/src/py/flwr/server/strategy/strategy.py @@ -16,7 +16,7 @@ from abc import ABC, abstractmethod -from typing import Dict, List, Optional, Tuple, Union +from typing import Optional, Union from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar from flwr.server.client_manager import ClientManager @@ -47,7 +47,7 @@ def initialize_parameters( @abstractmethod def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> list[tuple[ClientProxy, FitIns]]: """Configure the next round of training. Parameters @@ -72,9 +72,9 @@ def configure_fit( def aggregate_fit( self, server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, FitRes]], + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]], + ) -> tuple[Optional[Parameters], dict[str, Scalar]]: """Aggregate training results. Parameters @@ -108,7 +108,7 @@ def aggregate_fit( @abstractmethod def configure_evaluate( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, EvaluateIns]]: + ) -> list[tuple[ClientProxy, EvaluateIns]]: """Configure the next round of evaluation. Parameters @@ -134,9 +134,9 @@ def configure_evaluate( def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Aggregate evaluation results. Parameters @@ -164,7 +164,7 @@ def aggregate_evaluate( @abstractmethod def evaluate( self, server_round: int, parameters: Parameters - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Optional[tuple[float, dict[str, Scalar]]]: """Evaluate the current model parameters. This function can be used to perform centralized (i.e., server-side) evaluation diff --git a/src/py/flwr/server/superlink/driver/driver_grpc.py b/src/py/flwr/server/superlink/driver/driver_grpc.py index b7b914206f72..327d8244ba11 100644 --- a/src/py/flwr/server/superlink/driver/driver_grpc.py +++ b/src/py/flwr/server/superlink/driver/driver_grpc.py @@ -15,7 +15,7 @@ """Driver gRPC API.""" from logging import INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -25,7 +25,7 @@ add_DriverServicer_to_server, ) from flwr.server.superlink.ffs.ffs_factory import FfsFactory -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory from ..fleet.grpc_bidi.grpc_server import generic_create_grpc_server from .driver_servicer import DriverServicer @@ -33,9 +33,9 @@ def run_driver_api_grpc( address: str, - state_factory: StateFactory, + state_factory: LinkStateFactory, ffs_factory: FfsFactory, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], ) -> grpc.Server: """Run Driver API (gRPC, request-response).""" # Create Driver API gRPC server diff --git a/src/py/flwr/server/superlink/driver/driver_servicer.py b/src/py/flwr/server/superlink/driver/driver_servicer.py index 73cd1c73a6fd..41a1a64e8879 100644 --- a/src/py/flwr/server/superlink/driver/driver_servicer.py +++ b/src/py/flwr/server/superlink/driver/driver_servicer.py @@ -17,7 +17,7 @@ import time from logging import DEBUG -from typing import List, Optional, Set +from typing import Optional from uuid import UUID import grpc @@ -32,8 +32,6 @@ from flwr.common.typing import Fab from flwr.proto import driver_pb2_grpc # pylint: disable=E0611 from flwr.proto.driver_pb2 import ( # pylint: disable=E0611 - CreateRunRequest, - CreateRunResponse, GetNodesRequest, GetNodesResponse, PullTaskResRequest, @@ -44,6 +42,8 @@ from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import ( # pylint: disable=E0611 + CreateRunRequest, + CreateRunResponse, GetRunRequest, GetRunResponse, Run, @@ -51,14 +51,16 @@ from flwr.proto.task_pb2 import TaskRes # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs import Ffs from flwr.server.superlink.ffs.ffs_factory import FfsFactory -from flwr.server.superlink.state import State, StateFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory from flwr.server.utils.validator import validate_task_ins_or_res class DriverServicer(driver_pb2_grpc.DriverServicer): """Driver API servicer.""" - def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: self.state_factory = state_factory self.ffs_factory = ffs_factory @@ -67,9 +69,9 @@ def GetNodes( ) -> GetNodesResponse: """Get available nodes.""" log(DEBUG, "DriverServicer.GetNodes") - state: State = self.state_factory.state() - all_ids: Set[int] = state.get_nodes(request.run_id) - nodes: List[Node] = [ + state: LinkState = self.state_factory.state() + all_ids: set[int] = state.get_nodes(request.run_id) + nodes: list[Node] = [ Node(node_id=node_id, anonymous=False) for node_id in all_ids ] return GetNodesResponse(nodes=nodes) @@ -79,7 +81,7 @@ def CreateRun( ) -> CreateRunResponse: """Create run ID.""" log(DEBUG, "DriverServicer.CreateRun") - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() if request.HasField("fab"): fab = fab_from_proto(request.fab) ffs: Ffs = self.ffs_factory.ffs() @@ -116,10 +118,10 @@ def PushTaskIns( _raise_if(bool(validation_errors), ", ".join(validation_errors)) # Init state - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() # Store each TaskIns - task_ids: List[Optional[UUID]] = [] + task_ids: list[Optional[UUID]] = [] for task_ins in request.task_ins_list: task_id: Optional[UUID] = state.store_task_ins(task_ins=task_ins) task_ids.append(task_id) @@ -135,10 +137,10 @@ def PullTaskRes( log(DEBUG, "DriverServicer.PullTaskRes") # Convert each task_id str to UUID - task_ids: Set[UUID] = {UUID(task_id) for task_id in request.task_ids} + task_ids: set[UUID] = {UUID(task_id) for task_id in request.task_ids} # Init state - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() # Register callback def on_rpc_done() -> None: @@ -155,7 +157,7 @@ def on_rpc_done() -> None: context.add_callback(on_rpc_done) # Read from state - task_res_list: List[TaskRes] = state.get_task_res(task_ids=task_ids, limit=None) + task_res_list: list[TaskRes] = state.get_task_res(task_ids=task_ids) context.set_code(grpc.StatusCode.OK) return PullTaskResResponse(task_res_list=task_res_list) @@ -167,7 +169,7 @@ def GetRun( log(DEBUG, "DriverServicer.GetRun") # Init state - state: State = self.state_factory.state() + state: LinkState = self.state_factory.state() # Retrieve run information run = state.get_run(request.run_id) diff --git a/src/py/flwr/server/superlink/ffs/disk_ffs.py b/src/py/flwr/server/superlink/ffs/disk_ffs.py index 98ec4f93498f..4f1ab05be9a2 100644 --- a/src/py/flwr/server/superlink/ffs/disk_ffs.py +++ b/src/py/flwr/server/superlink/ffs/disk_ffs.py @@ -17,7 +17,7 @@ import hashlib import json from pathlib import Path -from typing import Dict, List, Optional, Tuple +from typing import Optional from flwr.server.superlink.ffs.ffs import Ffs @@ -35,7 +35,7 @@ def __init__(self, base_dir: str) -> None: """ self.base_dir = Path(base_dir) - def put(self, content: bytes, meta: Dict[str, str]) -> str: + def put(self, content: bytes, meta: dict[str, str]) -> str: """Store bytes and metadata and return key (hash of content). Parameters @@ -58,7 +58,7 @@ def put(self, content: bytes, meta: Dict[str, str]) -> str: return content_hash - def get(self, key: str) -> Optional[Tuple[bytes, Dict[str, str]]]: + def get(self, key: str) -> Optional[tuple[bytes, dict[str, str]]]: """Return tuple containing the object content and metadata. Parameters @@ -90,7 +90,7 @@ def delete(self, key: str) -> None: (self.base_dir / key).unlink() (self.base_dir / f"{key}.META").unlink() - def list(self) -> List[str]: + def list(self) -> list[str]: """List all keys. Return all available keys in this `Ffs` instance. diff --git a/src/py/flwr/server/superlink/ffs/ffs.py b/src/py/flwr/server/superlink/ffs/ffs.py index fab3b1fdfb3e..b1d26e74c157 100644 --- a/src/py/flwr/server/superlink/ffs/ffs.py +++ b/src/py/flwr/server/superlink/ffs/ffs.py @@ -16,14 +16,14 @@ import abc -from typing import Dict, List, Optional, Tuple +from typing import Optional class Ffs(abc.ABC): # pylint: disable=R0904 """Abstract Flower File Storage interface for large objects.""" @abc.abstractmethod - def put(self, content: bytes, meta: Dict[str, str]) -> str: + def put(self, content: bytes, meta: dict[str, str]) -> str: """Store bytes and metadata and return sha256hex hash of data as str. Parameters @@ -40,7 +40,7 @@ def put(self, content: bytes, meta: Dict[str, str]) -> str: """ @abc.abstractmethod - def get(self, key: str) -> Optional[Tuple[bytes, Dict[str, str]]]: + def get(self, key: str) -> Optional[tuple[bytes, dict[str, str]]]: """Return tuple containing the object content and metadata. Parameters @@ -65,7 +65,7 @@ def delete(self, key: str) -> None: """ @abc.abstractmethod - def list(self) -> List[str]: + def list(self) -> list[str]: """List keys of all stored objects. Return all available keys in this `Ffs` instance. diff --git a/src/py/flwr/server/superlink/ffs/ffs_test.py b/src/py/flwr/server/superlink/ffs/ffs_test.py index f7fbbf1218e1..5cf28cfd2cbe 100644 --- a/src/py/flwr/server/superlink/ffs/ffs_test.py +++ b/src/py/flwr/server/superlink/ffs/ffs_test.py @@ -21,7 +21,6 @@ import tempfile import unittest from abc import abstractmethod -from typing import Dict from flwr.server.superlink.ffs import DiskFfs, Ffs @@ -65,7 +64,7 @@ def test_get(self) -> None: ffs: Ffs = self.ffs_factory() content_expected = b"content" hash_expected = hashlib.sha256(content_expected).hexdigest() - meta_expected: Dict[str, str] = {"meta_key": "meta_value"} + meta_expected: dict[str, str] = {"meta_key": "meta_value"} with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: file.write(content_expected) @@ -93,7 +92,7 @@ def test_delete(self) -> None: ffs: Ffs = self.ffs_factory() content_expected = b"content" hash_expected = hashlib.sha256(content_expected).hexdigest() - meta_expected: Dict[str, str] = {"meta_key": "meta_value"} + meta_expected: dict[str, str] = {"meta_key": "meta_value"} with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: file.write(content_expected) @@ -117,7 +116,7 @@ def test_list(self) -> None: ffs: Ffs = self.ffs_factory() content_expected = b"content" hash_expected = hashlib.sha256(content_expected).hexdigest() - meta_expected: Dict[str, str] = {"meta_key": "meta_value"} + meta_expected: dict[str, str] = {"meta_key": "meta_value"} with open(os.path.join(self.tmp_dir.name, hash_expected), "wb") as file: file.write(content_expected) diff --git a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py index 278e20eb1d69..ffef57d89e8c 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_adapter/grpc_adapter_servicer.py @@ -16,12 +16,20 @@ from logging import DEBUG, INFO -from typing import Callable, Type, TypeVar +from typing import Callable, TypeVar import grpc from google.protobuf.message import Message as GrpcMessage +from flwr.common.constant import ( + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY, +) from flwr.common.logger import log +from flwr.common.version import package_name, package_version from flwr.proto import grpcadapter_pb2_grpc # pylint: disable=E0611 from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 @@ -40,21 +48,28 @@ from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.message_handler import message_handler -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory T = TypeVar("T", bound=GrpcMessage) def _handle( msg_container: MessageContainer, - request_type: Type[T], + request_type: type[T], handler: Callable[[T], GrpcMessage], ) -> MessageContainer: req = request_type.FromString(msg_container.grpc_message_content) res = handler(req) + res_cls = res.__class__ return MessageContainer( - metadata={}, - grpc_message_name=res.__class__.__qualname__, + metadata={ + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_NAME_KEY: package_name, + GRPC_ADAPTER_METADATA_FLOWER_PACKAGE_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_FLOWER_VERSION_KEY: package_version, + GRPC_ADAPTER_METADATA_MESSAGE_MODULE_KEY: res_cls.__module__, + GRPC_ADAPTER_METADATA_MESSAGE_QUALNAME_KEY: res_cls.__qualname__, + }, + grpc_message_name=res_cls.__qualname__, grpc_message_content=res.SerializeToString(), ) @@ -62,7 +77,9 @@ def _handle( class GrpcAdapterServicer(grpcadapter_pb2_grpc.GrpcAdapterServicer): """Fleet API via GrpcAdapter servicer.""" - def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: self.state_factory = state_factory self.ffs_factory = ffs_factory diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py index 79f1a8f9902b..38f0dfdae299 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/flower_service_servicer.py @@ -19,7 +19,8 @@ """ import uuid -from typing import Callable, Iterator +from collections.abc import Iterator +from typing import Callable import grpc from iterators import TimeoutIterator diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py index 5fe0396696ab..476e2914f4d9 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge.py @@ -15,10 +15,11 @@ """Provides class GrpcBridge.""" +from collections.abc import Iterator from dataclasses import dataclass from enum import Enum from threading import Condition -from typing import Iterator, Optional +from typing import Optional from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py index f9b6b97030f0..6d9e081d8dd4 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_bridge_test.py @@ -17,7 +17,7 @@ import time from threading import Thread -from typing import List, Union +from typing import Union from flwr.proto.transport_pb2 import ( # pylint: disable=E0611 ClientMessage, @@ -32,7 +32,7 @@ def start_worker( - rounds: int, bridge: GrpcBridge, results: List[ClientMessage] + rounds: int, bridge: GrpcBridge, results: list[ClientMessage] ) -> Thread: """Simulate processing loop with five calls.""" @@ -59,7 +59,7 @@ def test_workflow_successful() -> None: """Test full workflow.""" # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() @@ -90,7 +90,7 @@ def test_workflow_close() -> None: """ # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() @@ -135,7 +135,7 @@ def test_ins_wrapper_iterator_close_while_blocking() -> None: """ # Prepare rounds = 5 - client_messages_received: List[ClientMessage] = [] + client_messages_received: list[ClientMessage] = [] bridge = GrpcBridge() ins_wrapper_iterator = bridge.ins_wrapper_iterator() diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py index dd78acb72fb1..9d2e13d5b107 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server.py @@ -17,8 +17,9 @@ import concurrent.futures import sys +from collections.abc import Sequence from logging import ERROR -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union import grpc @@ -46,7 +47,7 @@ AddServicerToServerFn = Callable[..., Any] -def valid_certificates(certificates: Tuple[bytes, bytes, bytes]) -> bool: +def valid_certificates(certificates: tuple[bytes, bytes, bytes]) -> bool: """Validate certificates tuple.""" is_valid = ( all(isinstance(certificate, bytes) for certificate in certificates) @@ -59,13 +60,13 @@ def valid_certificates(certificates: Tuple[bytes, bytes, bytes]) -> bool: return is_valid -def start_grpc_server( # pylint: disable=too-many-arguments +def start_grpc_server( # pylint: disable=too-many-arguments,R0917 client_manager: ClientManager, server_address: str, max_concurrent_workers: int = 1000, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, ) -> grpc.Server: """Create and start a gRPC server running FlowerServiceServicer. @@ -155,25 +156,25 @@ def start_grpc_server( # pylint: disable=too-many-arguments return server -def generic_create_grpc_server( # pylint: disable=too-many-arguments +def generic_create_grpc_server( # pylint: disable=too-many-arguments,R0917 servicer_and_add_fn: Union[ - Tuple[FleetServicer, AddServicerToServerFn], - Tuple[GrpcAdapterServicer, AddServicerToServerFn], - Tuple[FlowerServiceServicer, AddServicerToServerFn], - Tuple[DriverServicer, AddServicerToServerFn], + tuple[FleetServicer, AddServicerToServerFn], + tuple[GrpcAdapterServicer, AddServicerToServerFn], + tuple[FlowerServiceServicer, AddServicerToServerFn], + tuple[DriverServicer, AddServicerToServerFn], ], server_address: str, max_concurrent_workers: int = 1000, max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, keepalive_time_ms: int = 210000, - certificates: Optional[Tuple[bytes, bytes, bytes]] = None, + certificates: Optional[tuple[bytes, bytes, bytes]] = None, interceptors: Optional[Sequence[grpc.ServerInterceptor]] = None, ) -> grpc.Server: """Create a gRPC server with a single servicer. Parameters ---------- - servicer_and_add_fn : Tuple + servicer_and_add_fn : tuple A tuple holding a servicer implementation and a matching add_Servicer_to_server function. server_address : str @@ -213,6 +214,8 @@ def generic_create_grpc_server( # pylint: disable=too-many-arguments * CA certificate. * server certificate. * server private key. + interceptors : Optional[Sequence[grpc.ServerInterceptor]] (default: None) + A list of gRPC interceptors. Returns ------- diff --git a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py index 7ff730b17afa..9635993e0ad5 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_bidi/grpc_server_test.py @@ -20,7 +20,7 @@ from contextlib import closing from os.path import abspath, dirname, join from pathlib import Path -from typing import Tuple, cast +from typing import cast from flwr.server.client_manager import SimpleClientManager from flwr.server.superlink.fleet.grpc_bidi.grpc_server import ( @@ -31,7 +31,7 @@ root_dir = dirname(abspath(join(__file__, "../../../../../../.."))) -def load_certificates() -> Tuple[str, str, str]: +def load_certificates() -> tuple[str, str, str]: """Generate and load SSL credentials/certificates. Utility function for loading for SSL-enabled gRPC servertests. diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py index e0501e54fafc..dacbab135057 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/fleet_servicer.py @@ -37,13 +37,15 @@ from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs_factory import FfsFactory from flwr.server.superlink.fleet.message_handler import message_handler -from flwr.server.superlink.state import StateFactory +from flwr.server.superlink.linkstate import LinkStateFactory class FleetServicer(fleet_pb2_grpc.FleetServicer): """Fleet API servicer.""" - def __init__(self, state_factory: StateFactory, ffs_factory: FfsFactory) -> None: + def __init__( + self, state_factory: LinkStateFactory, ffs_factory: FfsFactory + ) -> None: self.state_factory = state_factory self.ffs_factory = ffs_factory @@ -51,19 +53,22 @@ def CreateNode( self, request: CreateNodeRequest, context: grpc.ServicerContext ) -> CreateNodeResponse: """.""" - log(INFO, "FleetServicer.CreateNode") + log(INFO, "[Fleet.CreateNode] Request ping_interval=%s", request.ping_interval) + log(DEBUG, "[Fleet.CreateNode] Request: %s", request) response = message_handler.create_node( request=request, state=self.state_factory.state(), ) - log(INFO, "FleetServicer: Created node_id=%s", response.node.node_id) + log(INFO, "[Fleet.CreateNode] Created node_id=%s", response.node.node_id) + log(DEBUG, "[Fleet.CreateNode] Response: %s", response) return response def DeleteNode( self, request: DeleteNodeRequest, context: grpc.ServicerContext ) -> DeleteNodeResponse: """.""" - log(INFO, "FleetServicer.DeleteNode") + log(INFO, "[Fleet.DeleteNode] Delete node_id=%s", request.node.node_id) + log(DEBUG, "[Fleet.DeleteNode] Request: %s", request) return message_handler.delete_node( request=request, state=self.state_factory.state(), @@ -71,7 +76,7 @@ def DeleteNode( def Ping(self, request: PingRequest, context: grpc.ServicerContext) -> PingResponse: """.""" - log(DEBUG, "FleetServicer.Ping") + log(DEBUG, "[Fleet.Ping] Request: %s", request) return message_handler.ping( request=request, state=self.state_factory.state(), @@ -81,7 +86,8 @@ def PullTaskIns( self, request: PullTaskInsRequest, context: grpc.ServicerContext ) -> PullTaskInsResponse: """Pull TaskIns.""" - log(INFO, "FleetServicer.PullTaskIns") + log(INFO, "[Fleet.PullTaskIns] node_id=%s", request.node.node_id) + log(DEBUG, "[Fleet.PullTaskIns] Request: %s", request) return message_handler.pull_task_ins( request=request, state=self.state_factory.state(), @@ -91,7 +97,14 @@ def PushTaskRes( self, request: PushTaskResRequest, context: grpc.ServicerContext ) -> PushTaskResResponse: """Push TaskRes.""" - log(INFO, "FleetServicer.PushTaskRes") + if request.task_res_list: + log( + INFO, + "[Fleet.PushTaskRes] Push results from node_id=%s", + request.task_res_list[0].task.producer.node_id, + ) + else: + log(INFO, "[Fleet.PushTaskRes] No task results to push") return message_handler.push_task_res( request=request, state=self.state_factory.state(), @@ -101,7 +114,7 @@ def GetRun( self, request: GetRunRequest, context: grpc.ServicerContext ) -> GetRunResponse: """Get run information.""" - log(INFO, "FleetServicer.GetRun") + log(INFO, "[Fleet.GetRun] Requesting `Run` for run_id=%s", request.run_id) return message_handler.get_run( request=request, state=self.state_factory.state(), @@ -111,7 +124,7 @@ def GetFab( self, request: GetFabRequest, context: grpc.ServicerContext ) -> GetFabResponse: """Get FAB.""" - log(DEBUG, "DriverServicer.GetFab") + log(INFO, "[Fleet.GetFab] Requesting FAB for fab_hash=%s", request.hash_str) return message_handler.get_fab( request=request, ffs=self.ffs_factory.ffs(), diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py index 87ac45a4f9c8..2e7623c34241 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor.py @@ -16,8 +16,9 @@ import base64 +from collections.abc import Sequence from logging import INFO, WARNING -from typing import Any, Callable, Optional, Sequence, Tuple, Union +from typing import Any, Callable, Optional, Union import grpc from cryptography.hazmat.primitives.asymmetric import ec @@ -29,6 +30,7 @@ generate_shared_key, verify_hmac, ) +from flwr.proto.fab_pb2 import GetFabRequest, GetFabResponse # pylint: disable=E0611 from flwr.proto.fleet_pb2 import ( # pylint: disable=E0611 CreateNodeRequest, CreateNodeResponse, @@ -43,7 +45,7 @@ ) from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 -from flwr.server.superlink.state import State +from flwr.server.superlink.linkstate import LinkState _PUBLIC_KEY_HEADER = "public-key" _AUTH_TOKEN_HEADER = "auth-token" @@ -55,6 +57,7 @@ PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ] Response = Union[ @@ -64,11 +67,12 @@ PushTaskResResponse, GetRunResponse, PingResponse, + GetFabResponse, ] def _get_value_from_tuples( - key_string: str, tuples: Sequence[Tuple[str, Union[str, bytes]]] + key_string: str, tuples: Sequence[tuple[str, Union[str, bytes]]] ) -> bytes: value = next((value for key, value in tuples if key == key_string), "") if isinstance(value, str): @@ -78,13 +82,13 @@ def _get_value_from_tuples( class AuthenticateServerInterceptor(grpc.ServerInterceptor): # type: ignore - """Server interceptor for client authentication.""" + """Server interceptor for node authentication.""" - def __init__(self, state: State): + def __init__(self, state: LinkState): self.state = state - self.client_public_keys = state.get_client_public_keys() - if len(self.client_public_keys) == 0: + self.node_public_keys = state.get_node_public_keys() + if len(self.node_public_keys) == 0: log(WARNING, "Authentication enabled, but no known public keys configured") private_key = self.state.get_server_private_key() @@ -103,9 +107,9 @@ def intercept_service( ) -> grpc.RpcMethodHandler: """Flower server interceptor authentication logic. - Intercept all unary calls from clients and authenticate clients by validating - auth metadata sent by the client. Continue RPC call if client is authenticated, - else, terminate RPC call by setting context to abort. + Intercept all unary calls from nodes and authenticate nodes by validating auth + metadata sent by the node. Continue RPC call if node is authenticated, else, + terminate RPC call by setting context to abort. """ # One of the method handlers in # `flwr.server.superlink.fleet.grpc_rere.fleet_server.FleetServicer` @@ -119,17 +123,17 @@ def _generic_method_handler( request: Request, context: grpc.ServicerContext, ) -> Response: - client_public_key_bytes = base64.urlsafe_b64decode( + node_public_key_bytes = base64.urlsafe_b64decode( _get_value_from_tuples( _PUBLIC_KEY_HEADER, context.invocation_metadata() ) ) - if client_public_key_bytes not in self.client_public_keys: + if node_public_key_bytes not in self.node_public_keys: context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") if isinstance(request, CreateNodeRequest): response = self._create_authenticated_node( - client_public_key_bytes, request, context + node_public_key_bytes, request, context ) log( INFO, @@ -144,13 +148,13 @@ def _generic_method_handler( _AUTH_TOKEN_HEADER, context.invocation_metadata() ) ) - public_key = bytes_to_public_key(client_public_key_bytes) + public_key = bytes_to_public_key(node_public_key_bytes) if not self._verify_hmac(public_key, request, hmac_value): context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") # Verify node_id - node_id = self.state.get_node_id(client_public_key_bytes) + node_id = self.state.get_node_id(node_public_key_bytes) if not self._verify_node_id(node_id, request): context.abort(grpc.StatusCode.UNAUTHENTICATED, "Access denied") @@ -172,6 +176,7 @@ def _verify_node_id( PushTaskResRequest, GetRunRequest, PingRequest, + GetFabRequest, ], ) -> bool: if node_id is None: @@ -188,7 +193,8 @@ def _verify_hmac( self, public_key: ec.EllipticCurvePublicKey, request: Request, hmac_value: bytes ) -> bool: shared_secret = generate_shared_key(self.server_private_key, public_key) - return verify_hmac(shared_secret, request.SerializeToString(True), hmac_value) + message_bytes = request.SerializeToString(deterministic=True) + return verify_hmac(shared_secret, message_bytes, hmac_value) def _create_authenticated_node( self, diff --git a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py index ece443a816cb..d44f4eb7e8f9 100644 --- a/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py +++ b/src/py/flwr/server/superlink/fleet/grpc_rere/server_interceptor_test.py @@ -20,6 +20,7 @@ import grpc +from flwr.common.constant import FLEET_API_GRPC_RERE_DEFAULT_ADDRESS from flwr.common.secure_aggregation.crypto.symmetric_encryption import ( compute_hmac, generate_key_pairs, @@ -42,9 +43,9 @@ from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskRes # pylint: disable=E0611 -from flwr.server.app import ADDRESS_FLEET_API_GRPC_RERE, _run_fleet_api_grpc_rere +from flwr.server.app import _run_fleet_api_grpc_rere from flwr.server.superlink.ffs.ffs_factory import FfsFactory -from flwr.server.superlink.state.state_factory import StateFactory +from flwr.server.superlink.linkstate.linkstate_factory import LinkStateFactory from .server_interceptor import ( _AUTH_TOKEN_HEADER, @@ -58,10 +59,10 @@ class TestServerInterceptor(unittest.TestCase): # pylint: disable=R0902 def setUp(self) -> None: """Initialize mock stub and server interceptor.""" - self._client_private_key, self._client_public_key = generate_key_pairs() + self._node_private_key, self._node_public_key = generate_key_pairs() self._server_private_key, self._server_public_key = generate_key_pairs() - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") self.state = state_factory.state() ffs_factory = FfsFactory(".") self.ffs = ffs_factory.ffs() @@ -69,13 +70,11 @@ def setUp(self) -> None: private_key_to_bytes(self._server_private_key), public_key_to_bytes(self._server_public_key), ) - self.state.store_client_public_keys( - {public_key_to_bytes(self._client_public_key)} - ) + self.state.store_node_public_keys({public_key_to_bytes(self._node_public_key)}) self._server_interceptor = AuthenticateServerInterceptor(self.state) self._server: grpc.Server = _run_fleet_api_grpc_rere( - ADDRESS_FLEET_API_GRPC_RERE, + FLEET_API_GRPC_RERE_DEFAULT_ADDRESS, state_factory, ffs_factory, None, @@ -122,7 +121,7 @@ def test_successful_create_node_with_metadata(self) -> None: """Test server interceptor for creating node.""" # Prepare public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -145,9 +144,9 @@ def test_successful_create_node_with_metadata(self) -> None: def test_unsuccessful_create_node_with_metadata(self) -> None: """Test server interceptor for creating node unsuccessfully.""" # Prepare - _, client_public_key = generate_key_pairs() + _, node_public_key = generate_key_pairs() public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(client_public_key) + public_key_to_bytes(node_public_key) ) # Execute & Assert @@ -161,17 +160,17 @@ def test_successful_delete_node_with_metadata(self) -> None: """Test server interceptor for deleting node.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = DeleteNodeRequest(node=Node(node_id=node_id)) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -191,16 +190,16 @@ def test_unsuccessful_delete_node_with_metadata(self) -> None: """Test server interceptor for deleting node unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = DeleteNodeRequest(node=Node(node_id=node_id)) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -217,17 +216,17 @@ def test_successful_pull_task_ins_with_metadata(self) -> None: """Test server interceptor for pull task ins.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PullTaskInsRequest(node=Node(node_id=node_id)) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -247,16 +246,16 @@ def test_unsuccessful_pull_task_ins_with_metadata(self) -> None: """Test server interceptor for pull task ins unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PullTaskInsRequest(node=Node(node_id=node_id)) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -273,19 +272,19 @@ def test_successful_push_task_res_with_metadata(self) -> None: """Test server interceptor for push task res.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PushTaskResRequest( task_res_list=[TaskRes(task=Task(producer=Node(node_id=node_id)))] ) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -305,18 +304,18 @@ def test_unsuccessful_push_task_res_with_metadata(self) -> None: """Test server interceptor for push task res unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PushTaskResRequest( task_res_list=[TaskRes(task=Task(producer=Node(node_id=node_id)))] ) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -333,18 +332,18 @@ def test_successful_get_run_with_metadata(self) -> None: """Test server interceptor for pull task ins.""" # Prepare self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) run_id = self.state.create_run("", "", "", {}) request = GetRunRequest(run_id=run_id) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -364,17 +363,17 @@ def test_unsuccessful_get_run_with_metadata(self) -> None: """Test server interceptor for pull task ins unsuccessfully.""" # Prepare self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) run_id = self.state.create_run("", "", "", {}) request = GetRunRequest(run_id=run_id) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -391,17 +390,17 @@ def test_successful_ping_with_metadata(self) -> None: """Test server interceptor for pull task ins.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PingRequest(node=Node(node_id=node_id)) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute @@ -421,16 +420,16 @@ def test_unsuccessful_ping_with_metadata(self) -> None: """Test server interceptor for pull task ins unsuccessfully.""" # Prepare node_id = self.state.create_node( - ping_interval=30, public_key=public_key_to_bytes(self._client_public_key) + ping_interval=30, public_key=public_key_to_bytes(self._node_public_key) ) request = PingRequest(node=Node(node_id=node_id)) - client_private_key, _ = generate_key_pairs() - shared_secret = generate_shared_key(client_private_key, self._server_public_key) + node_private_key, _ = generate_key_pairs() + shared_secret = generate_shared_key(node_private_key, self._server_public_key) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) # Execute & Assert @@ -446,7 +445,7 @@ def test_unsuccessful_ping_with_metadata(self) -> None: def test_successful_restore_node(self) -> None: """Test server interceptor for restoring node.""" public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) response, call = self._create_node.with_call( request=CreateNodeRequest(), @@ -461,20 +460,20 @@ def test_successful_restore_node(self) -> None: ) node = response.node - client_node_id = node.node_id + node_node_id = node.node_id assert call.initial_metadata()[0] == expected_metadata assert isinstance(response, CreateNodeResponse) request = DeleteNodeRequest(node=node) shared_secret = generate_shared_key( - self._client_private_key, self._server_public_key + self._node_private_key, self._server_public_key ) hmac_value = base64.urlsafe_b64encode( - compute_hmac(shared_secret, request.SerializeToString(True)) + compute_hmac(shared_secret, request.SerializeToString(deterministic=True)) ) public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) response, call = self._delete_node.with_call( request=request, @@ -488,7 +487,7 @@ def test_successful_restore_node(self) -> None: assert grpc.StatusCode.OK == call.code() public_key_bytes = base64.urlsafe_b64encode( - public_key_to_bytes(self._client_public_key) + public_key_to_bytes(self._node_public_key) ) response, call = self._create_node.with_call( request=CreateNodeRequest(), @@ -504,4 +503,4 @@ def test_successful_restore_node(self) -> None: assert call.initial_metadata()[0] == expected_metadata assert isinstance(response, CreateNodeResponse) - assert response.node.node_id == client_node_id + assert response.node.node_id == node_node_id diff --git a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py index 64f9ac609998..38df6f441a20 100644 --- a/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py +++ b/src/py/flwr/server/superlink/fleet/message_handler/message_handler.py @@ -16,7 +16,7 @@ import time -from typing import List, Optional +from typing import Optional from uuid import UUID from flwr.common.serde import fab_to_proto, user_config_to_proto @@ -43,12 +43,12 @@ ) from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs import Ffs -from flwr.server.superlink.state import State +from flwr.server.superlink.linkstate import LinkState def create_node( request: CreateNodeRequest, # pylint: disable=unused-argument - state: State, + state: LinkState, ) -> CreateNodeResponse: """.""" # Create node @@ -56,7 +56,7 @@ def create_node( return CreateNodeResponse(node=Node(node_id=node_id, anonymous=False)) -def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: +def delete_node(request: DeleteNodeRequest, state: LinkState) -> DeleteNodeResponse: """.""" # Validate node_id if request.node.anonymous or request.node.node_id == 0: @@ -69,21 +69,21 @@ def delete_node(request: DeleteNodeRequest, state: State) -> DeleteNodeResponse: def ping( request: PingRequest, # pylint: disable=unused-argument - state: State, # pylint: disable=unused-argument + state: LinkState, # pylint: disable=unused-argument ) -> PingResponse: """.""" res = state.acknowledge_ping(request.node.node_id, request.ping_interval) return PingResponse(success=res) -def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsResponse: +def pull_task_ins(request: PullTaskInsRequest, state: LinkState) -> PullTaskInsResponse: """Pull TaskIns handler.""" # Get node_id if client node is not anonymous node = request.node # pylint: disable=no-member node_id: Optional[int] = None if node.anonymous else node.node_id # Retrieve TaskIns from State - task_ins_list: List[TaskIns] = state.get_task_ins(node_id=node_id, limit=1) + task_ins_list: list[TaskIns] = state.get_task_ins(node_id=node_id, limit=1) # Build response response = PullTaskInsResponse( @@ -92,7 +92,7 @@ def pull_task_ins(request: PullTaskInsRequest, state: State) -> PullTaskInsRespo return response -def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResResponse: +def push_task_res(request: PushTaskResRequest, state: LinkState) -> PushTaskResResponse: """Push TaskRes handler.""" # pylint: disable=no-member task_res: TaskRes = request.task_res_list[0] @@ -113,7 +113,7 @@ def push_task_res(request: PushTaskResRequest, state: State) -> PushTaskResRespo def get_run( - request: GetRunRequest, state: State # pylint: disable=W0613 + request: GetRunRequest, state: LinkState # pylint: disable=W0613 ) -> GetRunResponse: """Get run information.""" run = state.get_run(request.run_id) diff --git a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py index cf5ad16f7999..d38a2b0a500b 100644 --- a/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py +++ b/src/py/flwr/server/superlink/fleet/rest_rere/rest_api.py @@ -18,7 +18,8 @@ from __future__ import annotations import sys -from typing import Awaitable, Callable, TypeVar +from collections.abc import Awaitable +from typing import Callable, TypeVar from google.protobuf.message import Message as GrpcMessage @@ -39,7 +40,7 @@ from flwr.proto.run_pb2 import GetRunRequest, GetRunResponse # pylint: disable=E0611 from flwr.server.superlink.ffs.ffs import Ffs from flwr.server.superlink.fleet.message_handler import message_handler -from flwr.server.superlink.state import State +from flwr.server.superlink.linkstate import LinkState try: from starlette.applications import Starlette @@ -89,7 +90,7 @@ async def wrapper(request: Request) -> Response: async def create_node(request: CreateNodeRequest) -> CreateNodeResponse: """Create Node.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = app.state.STATE_FACTORY.state() # Handle message return message_handler.create_node(request=request, state=state) @@ -99,7 +100,7 @@ async def create_node(request: CreateNodeRequest) -> CreateNodeResponse: async def delete_node(request: DeleteNodeRequest) -> DeleteNodeResponse: """Delete Node Id.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = app.state.STATE_FACTORY.state() # Handle message return message_handler.delete_node(request=request, state=state) @@ -109,7 +110,7 @@ async def delete_node(request: DeleteNodeRequest) -> DeleteNodeResponse: async def pull_task_ins(request: PullTaskInsRequest) -> PullTaskInsResponse: """Pull TaskIns.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = app.state.STATE_FACTORY.state() # Handle message return message_handler.pull_task_ins(request=request, state=state) @@ -120,7 +121,7 @@ async def pull_task_ins(request: PullTaskInsRequest) -> PullTaskInsResponse: async def push_task_res(request: PushTaskResRequest) -> PushTaskResResponse: """Push TaskRes.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = app.state.STATE_FACTORY.state() # Handle message return message_handler.push_task_res(request=request, state=state) @@ -130,7 +131,7 @@ async def push_task_res(request: PushTaskResRequest) -> PushTaskResResponse: async def ping(request: PingRequest) -> PingResponse: """Ping.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = app.state.STATE_FACTORY.state() # Handle message return message_handler.ping(request=request, state=state) @@ -140,7 +141,7 @@ async def ping(request: PingRequest) -> PingResponse: async def get_run(request: GetRunRequest) -> GetRunResponse: """GetRun.""" # Get state from app - state: State = app.state.STATE_FACTORY.state() + state: LinkState = app.state.STATE_FACTORY.state() # Handle message return message_handler.get_run(request=request, state=state) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py b/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py index a8c671810a51..31129fce1b1b 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/__init__.py @@ -15,17 +15,16 @@ """Simulation Engine Backends.""" import importlib -from typing import Dict, Type from .backend import Backend, BackendConfig is_ray_installed = importlib.util.find_spec("ray") is not None # Mapping of supported backends -supported_backends: Dict[str, Type[Backend]] = {} +supported_backends: dict[str, type[Backend]] = {} # To log backend-specific error message when chosen backend isn't available -error_messages_backends: Dict[str, str] = {} +error_messages_backends: dict[str, str] = {} if is_ray_installed: from .raybackend import RayBackend diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/backend.py b/src/py/flwr/server/superlink/fleet/vce/backend/backend.py index 89341c0d238f..38be6032e3a5 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/backend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/backend.py @@ -16,14 +16,14 @@ from abc import ABC, abstractmethod -from typing import Callable, Dict, Tuple +from typing import Callable from flwr.client.client_app import ClientApp from flwr.common.context import Context from flwr.common.message import Message from flwr.common.typing import ConfigsRecordValues -BackendConfig = Dict[str, Dict[str, ConfigsRecordValues]] +BackendConfig = dict[str, dict[str, ConfigsRecordValues]] class Backend(ABC): @@ -62,5 +62,5 @@ def process_message( self, message: Message, context: Context, - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Submit a job to the backend.""" diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py index acfb248a6366..dd79d2ef7f62 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend.py @@ -16,7 +16,7 @@ import sys from logging import DEBUG, ERROR -from typing import Callable, Dict, Optional, Tuple, Union +from typing import Callable, Optional, Union import ray @@ -31,8 +31,8 @@ from .backend import Backend, BackendConfig -ClientResourcesDict = Dict[str, Union[int, float]] -ActorArgsDict = Dict[str, Union[int, float, Callable[[], None]]] +ClientResourcesDict = dict[str, Union[int, float]] +ActorArgsDict = dict[str, Union[int, float, Callable[[], None]]] class RayBackend(Backend): @@ -52,16 +52,11 @@ def __init__( # Validate client resources self.client_resources_key = "client_resources" - client_resources = self._validate_client_resources(config=backend_config) + self.client_resources = self._validate_client_resources(config=backend_config) - # Create actor pool - actor_kwargs = self._validate_actor_arguments(config=backend_config) - - self.pool = BasicActorPool( - actor_type=ClientAppActor, - client_resources=client_resources, - actor_kwargs=actor_kwargs, - ) + # Valide actor resources + self.actor_kwargs = self._validate_actor_arguments(config=backend_config) + self.pool: Optional[BasicActorPool] = None self.app_fn: Optional[Callable[[], ClientApp]] = None @@ -106,7 +101,7 @@ def _validate_actor_arguments(self, config: BackendConfig) -> ActorArgsDict: def init_ray(self, backend_config: BackendConfig) -> None: """Intialises Ray if not already initialised.""" if not ray.is_initialized(): - ray_init_args: Dict[ + ray_init_args: dict[ str, ConfigsRecordValues, ] = {} @@ -122,14 +117,24 @@ def init_ray(self, backend_config: BackendConfig) -> None: @property def num_workers(self) -> int: """Return number of actors in pool.""" - return self.pool.num_actors + return self.pool.num_actors if self.pool else 0 def is_worker_idle(self) -> bool: """Report whether the pool has idle actors.""" - return self.pool.is_actor_available() + return self.pool.is_actor_available() if self.pool else False def build(self, app_fn: Callable[[], ClientApp]) -> None: """Build pool of Ray actors that this backend will submit jobs to.""" + # Create Actor Pool + try: + self.pool = BasicActorPool( + actor_type=ClientAppActor, + client_resources=self.client_resources, + actor_kwargs=self.actor_kwargs, + ) + except Exception as ex: + raise ex + self.pool.add_actors_to_pool(self.pool.actors_capacity) # Set ClientApp callable that ray actors will use self.app_fn = app_fn @@ -139,13 +144,16 @@ def process_message( self, message: Message, context: Context, - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Run ClientApp that process a given message. Return output message and updated context. """ partition_id = context.node_config[PARTITION_ID_KEY] + if self.pool is None: + raise ValueError("The actor pool is empty, unfit to process messages.") + if self.app_fn is None: raise ValueError( "Unspecified function to load a `ClientApp`. " @@ -179,6 +187,7 @@ def process_message( def terminate(self) -> None: """Terminate all actors in actor pool.""" - self.pool.terminate_all_actors() + if self.pool: + self.pool.terminate_all_actors() ray.shutdown() log(DEBUG, "Terminated %s", self.__class__.__name__) diff --git a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py index cdb11401c29c..753f450e835c 100644 --- a/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/backend/raybackend_test.py @@ -15,14 +15,14 @@ """Test for Ray backend for the Fleet API using the Simulation Engine.""" from math import pi -from typing import Callable, Dict, Optional, Tuple, Union +from typing import Callable, Optional, Union from unittest import TestCase import ray from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import ( DEFAULT_TTL, Config, @@ -47,7 +47,7 @@ class DummyClient(NumPyClient): def __init__(self, state: RecordSet) -> None: self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = float(config["factor"]) * pi @@ -69,8 +69,8 @@ def _load_app() -> ClientApp: def backend_build_process_and_termination( backend: RayBackend, app_fn: Callable[[], ClientApp], - process_args: Optional[Tuple[Message, Context]] = None, -) -> Union[Tuple[Message, Context], None]: + process_args: Optional[tuple[Message, Context]] = None, +) -> Union[tuple[Message, Context], None]: """Build, process job and terminate RayBackend.""" backend.build(app_fn) to_return = None @@ -83,7 +83,7 @@ def backend_build_process_and_termination( return to_return -def _create_message_and_context() -> Tuple[Message, Context, float]: +def _create_message_and_context() -> tuple[Message, Context, float]: # Construct a Message mult_factor = 2024 @@ -104,8 +104,10 @@ def _create_message_and_context() -> Tuple[Message, Context, float]: ), ) - # Construct NodeState and retrieve context - node_state = NodeState(node_id=run_id, node_config={PARTITION_ID_KEY: str(0)}) + # Construct DeprecatedRunInfoStore and retrieve context + node_state = DeprecatedRunInfoStore( + node_id=run_id, node_config={PARTITION_ID_KEY: str(0)} + ) node_state.register_context(run_id=run_id) context = node_state.retrieve_context(run_id=run_id) diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api.py b/src/py/flwr/server/superlink/fleet/vce/vce_api.py index 165c2de73c21..7a2d28dec4fb 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api.py @@ -24,11 +24,11 @@ from pathlib import Path from queue import Empty, Queue from time import sleep -from typing import Callable, Dict, Optional +from typing import Callable, Optional from flwr.client.client_app import ClientApp, ClientAppException, LoadClientAppError from flwr.client.clientapp.utils import get_load_client_app_fn -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common.constant import ( NUM_PARTITIONS_KEY, PARTITION_ID_KEY, @@ -40,15 +40,15 @@ from flwr.common.serde import message_from_taskins, message_to_taskres from flwr.common.typing import Run from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import State, StateFactory +from flwr.server.superlink.linkstate import LinkState, LinkStateFactory from .backend import Backend, error_messages_backends, supported_backends -NodeToPartitionMapping = Dict[int, int] +NodeToPartitionMapping = dict[int, int] def _register_nodes( - num_nodes: int, state_factory: StateFactory + num_nodes: int, state_factory: LinkStateFactory ) -> NodeToPartitionMapping: """Register nodes with the StateFactory and create node-id:partition-id mapping.""" nodes_mapping: NodeToPartitionMapping = {} @@ -60,16 +60,16 @@ def _register_nodes( return nodes_mapping -def _register_node_states( +def _register_node_info_stores( nodes_mapping: NodeToPartitionMapping, run: Run, app_dir: Optional[str] = None, -) -> Dict[int, NodeState]: - """Create NodeState objects and pre-register the context for the run.""" - node_states: Dict[int, NodeState] = {} +) -> dict[int, DeprecatedRunInfoStore]: + """Create DeprecatedRunInfoStore objects and register the context for the run.""" + node_info_store: dict[int, DeprecatedRunInfoStore] = {} num_partitions = len(set(nodes_mapping.values())) for node_id, partition_id in nodes_mapping.items(): - node_states[node_id] = NodeState( + node_info_store[node_id] = DeprecatedRunInfoStore( node_id=node_id, node_config={ PARTITION_ID_KEY: partition_id, @@ -78,18 +78,18 @@ def _register_node_states( ) # Pre-register Context objects - node_states[node_id].register_context( + node_info_store[node_id].register_context( run_id=run.run_id, run=run, app_dir=app_dir ) - return node_states + return node_info_store # pylint: disable=too-many-arguments,too-many-locals def worker( taskins_queue: "Queue[TaskIns]", taskres_queue: "Queue[TaskRes]", - node_states: Dict[int, NodeState], + node_info_store: dict[int, DeprecatedRunInfoStore], backend: Backend, f_stop: threading.Event, ) -> None: @@ -103,7 +103,7 @@ def worker( node_id = task_ins.task.consumer.node_id # Retrieve context - context = node_states[node_id].retrieve_context(run_id=task_ins.run_id) + context = node_info_store[node_id].retrieve_context(run_id=task_ins.run_id) # Convert TaskIns to Message message = message_from_taskins(task_ins) @@ -112,7 +112,7 @@ def worker( out_mssg, updated_context = backend.process_message(message, context) # Update Context - node_states[node_id].update_context( + node_info_store[node_id].update_context( task_ins.run_id, context=updated_context ) except Empty: @@ -145,7 +145,7 @@ def worker( def add_taskins_to_queue( - state: State, + state: LinkState, queue: "Queue[TaskIns]", nodes_mapping: NodeToPartitionMapping, f_stop: threading.Event, @@ -160,7 +160,7 @@ def add_taskins_to_queue( def put_taskres_into_state( - state: State, queue: "Queue[TaskRes]", f_stop: threading.Event + state: LinkState, queue: "Queue[TaskRes]", f_stop: threading.Event ) -> None: """Put TaskRes into State from a queue.""" while not f_stop.is_set(): @@ -172,12 +172,13 @@ def put_taskres_into_state( pass +# pylint: disable=too-many-positional-arguments def run_api( app_fn: Callable[[], ClientApp], backend_fn: Callable[[], Backend], nodes_mapping: NodeToPartitionMapping, - state_factory: StateFactory, - node_states: Dict[int, NodeState], + state_factory: LinkStateFactory, + node_info_stores: dict[int, DeprecatedRunInfoStore], f_stop: threading.Event, ) -> None: """Run the VCE.""" @@ -222,7 +223,7 @@ def run_api( worker, taskins_queue, taskres_queue, - node_states, + node_info_stores, backend, f_stop, ) @@ -251,7 +252,7 @@ def run_api( # pylint: disable=too-many-arguments,unused-argument,too-many-locals,too-many-branches -# pylint: disable=too-many-statements +# pylint: disable=too-many-statements,too-many-positional-arguments def start_vce( backend_name: str, backend_config_json_stream: str, @@ -263,10 +264,12 @@ def start_vce( client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, num_supernodes: Optional[int] = None, - state_factory: Optional[StateFactory] = None, + state_factory: Optional[LinkStateFactory] = None, existing_nodes_mapping: Optional[NodeToPartitionMapping] = None, ) -> None: """Start Fleet API with the Simulation Engine.""" + nodes_mapping = {} + if client_app_attr is not None and client_app is not None: raise ValueError( "Both `client_app_attr` and `client_app` are provided, " @@ -300,7 +303,7 @@ def start_vce( if not state_factory: log(INFO, "A StateFactory was not supplied to the SimulationEngine.") # Create an empty in-memory state factory - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") log(INFO, "Created new %s.", state_factory.__class__.__name__) if num_supernodes: @@ -309,8 +312,8 @@ def start_vce( num_nodes=num_supernodes, state_factory=state_factory ) - # Construct mapping of NodeStates - node_states = _register_node_states( + # Construct mapping of DeprecatedRunInfoStore + node_info_stores = _register_node_info_stores( nodes_mapping=nodes_mapping, run=run, app_dir=app_dir if is_app else None ) @@ -340,17 +343,17 @@ def backend_fn() -> Backend: # Load ClientApp if needed def _load() -> ClientApp: + if client_app: + return client_app if client_app_attr: - app = get_load_client_app_fn( + return get_load_client_app_fn( default_app_ref=client_app_attr, app_path=app_dir, flwr_dir=flwr_dir, multi_app=False, - )(run.fab_id, run.fab_version) + )(run.fab_id, run.fab_version, run.fab_hash) - if client_app: - app = client_app - return app + raise ValueError("Either `client_app_attr` or `client_app` must be provided") app_fn = _load @@ -373,7 +376,7 @@ def _load_client_app() -> ClientApp: backend_fn, nodes_mapping, state_factory, - node_states, + node_info_stores, f_stop, ) except LoadClientAppError as loadapp_ex: diff --git a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py index 76e8ac9156d2..d14ce86c58c4 100644 --- a/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py +++ b/src/py/flwr/server/superlink/fleet/vce/vce_api_test.py @@ -22,7 +22,7 @@ from math import pi from pathlib import Path from time import sleep -from typing import Dict, Optional, Set, Tuple +from typing import Optional from unittest import TestCase from uuid import UUID @@ -48,7 +48,7 @@ _register_nodes, start_vce, ) -from flwr.server.superlink.state import InMemoryState, StateFactory +from flwr.server.superlink.linkstate import InMemoryLinkState, LinkStateFactory class DummyClient(NumPyClient): @@ -57,7 +57,7 @@ class DummyClient(NumPyClient): def __init__(self, state: RecordSet) -> None: self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = float(config["factor"]) * pi @@ -86,11 +86,11 @@ def terminate_simulation(f_stop: threading.Event, sleep_duration: int) -> None: def init_state_factory_nodes_mapping( num_nodes: int, num_messages: int, -) -> Tuple[StateFactory, NodeToPartitionMapping, Dict[UUID, float]]: +) -> tuple[LinkStateFactory, NodeToPartitionMapping, dict[UUID, float]]: """Instatiate StateFactory, register nodes and pre-insert messages in the state.""" # Register a state and a run_id in it run_id = 1234 - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") # Register a few nodes nodes_mapping = _register_nodes(num_nodes=num_nodes, state_factory=state_factory) @@ -106,13 +106,13 @@ def init_state_factory_nodes_mapping( # pylint: disable=too-many-locals def register_messages_into_state( - state_factory: StateFactory, + state_factory: LinkStateFactory, nodes_mapping: NodeToPartitionMapping, run_id: int, num_messages: int, -) -> Dict[UUID, float]: +) -> dict[UUID, float]: """Register `num_messages` into the state factory.""" - state: InMemoryState = state_factory.state() # type: ignore + state: InMemoryLinkState = state_factory.state() # type: ignore state.run_ids[run_id] = Run( run_id=run_id, fab_id="Mock/mock", @@ -123,7 +123,7 @@ def register_messages_into_state( # Artificially add TaskIns to state so they can be processed # by the Simulation Engine logic nodes_cycle = cycle(nodes_mapping.keys()) # we have more messages than supernodes - task_ids: Set[UUID] = set() # so we can retrieve them later + task_ids: set[UUID] = set() # so we can retrieve them later expected_results = {} for i in range(num_messages): dst_node_id = next(nodes_cycle) @@ -170,13 +170,13 @@ def _autoresolve_app_dir(rel_client_app_dir: str = "backend") -> str: return str(rel_app_dir.parent / rel_client_app_dir) -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments,too-many-positional-arguments def start_and_shutdown( backend: str = "ray", client_app_attr: Optional[str] = None, app_dir: str = "", num_supernodes: Optional[int] = None, - state_factory: Optional[StateFactory] = None, + state_factory: Optional[LinkStateFactory] = None, nodes_mapping: Optional[NodeToPartitionMapping] = None, duration: int = 0, backend_config: str = "{}", @@ -304,7 +304,7 @@ def test_start_and_shutdown_with_tasks_in_state(self) -> None: # Get all TaskRes state = state_factory.state() task_ids = set(expected_results.keys()) - task_res_list = state.get_task_res(task_ids=task_ids, limit=len(task_ids)) + task_res_list = state.get_task_res(task_ids=task_ids) # Check results by first converting to Message for task_res in task_res_list: diff --git a/src/py/flwr/server/superlink/linkstate/__init__.py b/src/py/flwr/server/superlink/linkstate/__init__.py new file mode 100644 index 000000000000..471cfbd2b5ec --- /dev/null +++ b/src/py/flwr/server/superlink/linkstate/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower LinkState.""" + + +from .in_memory_linkstate import InMemoryLinkState as InMemoryLinkState +from .linkstate import LinkState as LinkState +from .linkstate_factory import LinkStateFactory as LinkStateFactory +from .sqlite_linkstate import SqliteLinkState as SqliteLinkState + +__all__ = [ + "InMemoryLinkState", + "LinkState", + "LinkStateFactory", + "SqliteLinkState", +] diff --git a/src/py/flwr/server/superlink/state/in_memory_state.py b/src/py/flwr/server/superlink/linkstate/in_memory_linkstate.py similarity index 69% rename from src/py/flwr/server/superlink/state/in_memory_state.py rename to src/py/flwr/server/superlink/linkstate/in_memory_linkstate.py index fde8fe41912f..8fdb5a1ed9ec 100644 --- a/src/py/flwr/server/superlink/state/in_memory_state.py +++ b/src/py/flwr/server/superlink/linkstate/in_memory_linkstate.py @@ -12,40 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""In-memory State implementation.""" +"""In-memory LinkState implementation.""" import threading import time -from logging import ERROR -from typing import Dict, List, Optional, Set, Tuple +from logging import ERROR, WARNING +from typing import Optional from uuid import UUID, uuid4 from flwr.common import log, now -from flwr.common.constant import NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES +from flwr.common.constant import ( + MESSAGE_TTL_TOLERANCE, + NODE_ID_NUM_BYTES, + RUN_ID_NUM_BYTES, +) from flwr.common.typing import Run, UserConfig from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state.state import State +from flwr.server.superlink.linkstate.linkstate import LinkState from flwr.server.utils import validate_task_ins_or_res from .utils import generate_rand_int_from_bytes, make_node_unavailable_taskres -class InMemoryState(State): # pylint: disable=R0902,R0904 - """In-memory State implementation.""" +class InMemoryLinkState(LinkState): # pylint: disable=R0902,R0904 + """In-memory LinkState implementation.""" def __init__(self) -> None: # Map node_id to (online_until, ping_interval) - self.node_ids: Dict[int, Tuple[float, float]] = {} - self.public_key_to_node_id: Dict[bytes, int] = {} + self.node_ids: dict[int, tuple[float, float]] = {} + self.public_key_to_node_id: dict[bytes, int] = {} # Map run_id to (fab_id, fab_version) - self.run_ids: Dict[int, Run] = {} - self.task_ins_store: Dict[UUID, TaskIns] = {} - self.task_res_store: Dict[UUID, TaskRes] = {} + self.run_ids: dict[int, Run] = {} + self.task_ins_store: dict[UUID, TaskIns] = {} + self.task_res_store: dict[UUID, TaskRes] = {} - self.client_public_keys: Set[bytes] = set() + self.node_public_keys: set[bytes] = set() self.server_public_key: Optional[bytes] = None self.server_private_key: Optional[bytes] = None @@ -76,13 +80,14 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get all TaskIns that have not been delivered yet.""" if limit is not None and limit < 1: raise AssertionError("`limit` must be >= 1") # Find TaskIns for node_id that were not delivered yet - task_ins_list: List[TaskIns] = [] + task_ins_list: list[TaskIns] = [] + current_time = time.time() with self.lock: for _, task_ins in self.task_ins_store.items(): # pylint: disable=too-many-boolean-expressions @@ -91,11 +96,13 @@ def get_task_ins( and task_ins.task.consumer.anonymous is False and task_ins.task.consumer.node_id == node_id and task_ins.task.delivered_at == "" + and task_ins.task.created_at + task_ins.task.ttl > current_time ) or ( node_id is None # Anonymous and task_ins.task.consumer.anonymous is True and task_ins.task.consumer.node_id == 0 and task_ins.task.delivered_at == "" + and task_ins.task.created_at + task_ins.task.ttl > current_time ): task_ins_list.append(task_ins) if limit and len(task_ins_list) == limit: @@ -109,6 +116,7 @@ def get_task_ins( # Return TaskIns return task_ins_list + # pylint: disable=R0911 def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: """Store one TaskRes.""" # Validate task @@ -117,6 +125,55 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: log(ERROR, errors) return None + with self.lock: + # Check if the TaskIns it is replying to exists and is valid + task_ins_id = task_res.task.ancestry[0] + task_ins = self.task_ins_store.get(UUID(task_ins_id)) + + # Ensure that the consumer_id of taskIns matches the producer_id of taskRes. + if ( + task_ins + and task_res + and not ( + task_ins.task.consumer.anonymous or task_res.task.producer.anonymous + ) + and task_ins.task.consumer.node_id != task_res.task.producer.node_id + ): + return None + + if task_ins is None: + log(ERROR, "TaskIns with task_id %s does not exist.", task_ins_id) + return None + + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log( + ERROR, + "Failed to store TaskRes: TaskIns with task_id %s has expired.", + task_ins_id, + ) + return None + + # Fail if the TaskRes TTL exceeds the + # expiration time of the TaskIns it replies to. + # Condition: TaskIns.created_at + TaskIns.ttl ≥ + # TaskRes.created_at + TaskRes.ttl + # A small tolerance is introduced to account + # for floating-point precision issues. + max_allowed_ttl = ( + task_ins.task.created_at + task_ins.task.ttl - task_res.task.created_at + ) + if task_res.task.ttl and ( + task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE + ): + log( + WARNING, + "Received TaskRes with TTL %.2f " + "exceeding the allowed maximum TTL %.2f.", + task_res.task.ttl, + max_allowed_ttl, + ) + return None + # Validate run_id if task_res.run_id not in self.run_ids: log(ERROR, "`run_id` is invalid") @@ -133,27 +190,33 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Return the new task_id return task_id - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: """Get all TaskRes that have not been delivered yet.""" - if limit is not None and limit < 1: - raise AssertionError("`limit` must be >= 1") - with self.lock: # Find TaskRes that were not delivered yet - task_res_list: List[TaskRes] = [] - replied_task_ids: Set[UUID] = set() + task_res_list: list[TaskRes] = [] + replied_task_ids: set[UUID] = set() for _, task_res in self.task_res_store.items(): reply_to = UUID(task_res.task.ancestry[0]) + + # Check if corresponding TaskIns exists and is not expired + task_ins = self.task_ins_store.get(reply_to) + if task_ins is None: + log(WARNING, "TaskIns with task_id %s does not exist.", reply_to) + task_ids.remove(reply_to) + continue + + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log(WARNING, "TaskIns with task_id %s is expired.", reply_to) + task_ids.remove(reply_to) + continue + if reply_to in task_ids and task_res.task.delivered_at == "": task_res_list.append(task_res) replied_task_ids.add(reply_to) - if limit and len(task_res_list) == limit: - break # Check if the node is offline for task_id in task_ids - replied_task_ids: - if limit and len(task_res_list) == limit: - break task_ins = self.task_ins_store.get(task_id) if task_ins is None: continue @@ -175,10 +238,10 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Return TaskRes return task_res_list - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" - task_ins_to_be_deleted: Set[UUID] = set() - task_res_to_be_deleted: Set[UUID] = set() + task_ins_to_be_deleted: set[UUID] = set() + task_res_to_be_deleted: set[UUID] = set() with self.lock: for task_ins_id in task_ids: @@ -214,7 +277,7 @@ def num_task_res(self) -> int: def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: - """Create, store in state, and return `node_id`.""" + """Create, store in the link state, and return `node_id`.""" # Sample a random int64 as node_id node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) @@ -237,7 +300,7 @@ def create_node( return node_id def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: - """Delete a client node.""" + """Delete a node.""" with self.lock: if node_id not in self.node_ids: raise ValueError(f"Node {node_id} not found") @@ -253,8 +316,8 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: del self.node_ids[node_id] - def get_nodes(self, run_id: int) -> Set[int]: - """Return all available client nodes. + def get_nodes(self, run_id: int) -> set[int]: + """Return all available nodes. Constraints ----------- @@ -271,9 +334,9 @@ def get_nodes(self, run_id: int) -> Set[int]: if online_until > current_time } - def get_node_id(self, client_public_key: bytes) -> Optional[int]: - """Retrieve stored `node_id` filtered by `client_public_keys`.""" - return self.public_key_to_node_id.get(client_public_key) + def get_node_id(self, node_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `node_public_keys`.""" + return self.public_key_to_node_id.get(node_public_key) def create_run( self, @@ -302,7 +365,7 @@ def create_run( def store_server_private_public_key( self, private_key: bytes, public_key: bytes ) -> None: - """Store `server_private_key` and `server_public_key` in state.""" + """Store `server_private_key` and `server_public_key` in the link state.""" with self.lock: if self.server_private_key is None and self.server_public_key is None: self.server_private_key = private_key @@ -318,19 +381,19 @@ def get_server_public_key(self) -> Optional[bytes]: """Retrieve `server_public_key` in urlsafe bytes.""" return self.server_public_key - def store_client_public_keys(self, public_keys: Set[bytes]) -> None: - """Store a set of `client_public_keys` in state.""" + def store_node_public_keys(self, public_keys: set[bytes]) -> None: + """Store a set of `node_public_keys` in the link state.""" with self.lock: - self.client_public_keys = public_keys + self.node_public_keys = public_keys - def store_client_public_key(self, public_key: bytes) -> None: - """Store a `client_public_key` in state.""" + def store_node_public_key(self, public_key: bytes) -> None: + """Store a `node_public_key` in the link state.""" with self.lock: - self.client_public_keys.add(public_key) + self.node_public_keys.add(public_key) - def get_client_public_keys(self) -> Set[bytes]: - """Retrieve all currently stored `client_public_keys` as a set.""" - return self.client_public_keys + def get_node_public_keys(self) -> set[bytes]: + """Retrieve all currently stored `node_public_keys` as a set.""" + return self.node_public_keys def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" diff --git a/src/py/flwr/server/superlink/state/state.py b/src/py/flwr/server/superlink/linkstate/linkstate.py similarity index 82% rename from src/py/flwr/server/superlink/state/state.py rename to src/py/flwr/server/superlink/linkstate/linkstate.py index 80d3b799bce3..e8e254873957 100644 --- a/src/py/flwr/server/superlink/state/state.py +++ b/src/py/flwr/server/superlink/linkstate/linkstate.py @@ -12,19 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Abstract base class State.""" +"""Abstract base class LinkState.""" import abc -from typing import List, Optional, Set +from typing import Optional from uuid import UUID from flwr.common.typing import Run, UserConfig from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 -class State(abc.ABC): # pylint: disable=R0904 - """Abstract State.""" +class LinkState(abc.ABC): # pylint: disable=R0904 + """Abstract LinkState.""" @abc.abstractmethod def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: @@ -32,8 +32,8 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: Usually, the Driver API calls this to schedule instructions. - Stores the value of the `task_ins` in the state and, if successful, returns the - `task_id` (UUID) of the `task_ins`. If, for any reason, + Stores the value of the `task_ins` in the link state and, if successful, + returns the `task_id` (UUID) of the `task_ins`. If, for any reason, storing the `task_ins` fails, `None` is returned. Constraints @@ -51,7 +51,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: @abc.abstractmethod def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get TaskIns optionally filtered by node_id. Usually, the Fleet API calls this for Nodes planning to work on one or more @@ -98,7 +98,7 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: """ @abc.abstractmethod - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: """Get TaskRes for task_ids. Usually, the Driver API calls this method to get results for instructions it has @@ -106,12 +106,6 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe Retrieves all TaskRes for the given `task_ids` and returns and empty list of none could be found. - - Constraints - ----------- - If `limit` is not `None`, return, at most, `limit` number of TaskRes. The limit - will only take effect if enough task_ids are in the set AND are currently - available. If `limit` is set, it has to be greater zero. """ @abc.abstractmethod @@ -129,21 +123,21 @@ def num_task_res(self) -> int: """ @abc.abstractmethod - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" @abc.abstractmethod def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: - """Create, store in state, and return `node_id`.""" + """Create, store in the link state, and return `node_id`.""" @abc.abstractmethod def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: - """Remove `node_id` from state.""" + """Remove `node_id` from the link state.""" @abc.abstractmethod - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Retrieve all currently stored node IDs as a set. Constraints @@ -153,8 +147,8 @@ def get_nodes(self, run_id: int) -> Set[int]: """ @abc.abstractmethod - def get_node_id(self, client_public_key: bytes) -> Optional[int]: - """Retrieve stored `node_id` filtered by `client_public_keys`.""" + def get_node_id(self, node_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `node_public_keys`.""" @abc.abstractmethod def create_run( @@ -188,7 +182,7 @@ def get_run(self, run_id: int) -> Optional[Run]: def store_server_private_public_key( self, private_key: bytes, public_key: bytes ) -> None: - """Store `server_private_key` and `server_public_key` in state.""" + """Store `server_private_key` and `server_public_key` in the link state.""" @abc.abstractmethod def get_server_private_key(self) -> Optional[bytes]: @@ -199,16 +193,16 @@ def get_server_public_key(self) -> Optional[bytes]: """Retrieve `server_public_key` in urlsafe bytes.""" @abc.abstractmethod - def store_client_public_keys(self, public_keys: Set[bytes]) -> None: - """Store a set of `client_public_keys` in state.""" + def store_node_public_keys(self, public_keys: set[bytes]) -> None: + """Store a set of `node_public_keys` in the link state.""" @abc.abstractmethod - def store_client_public_key(self, public_key: bytes) -> None: - """Store a `client_public_key` in state.""" + def store_node_public_key(self, public_key: bytes) -> None: + """Store a `node_public_key` in the link state.""" @abc.abstractmethod - def get_client_public_keys(self) -> Set[bytes]: - """Retrieve all currently stored `client_public_keys` as a set.""" + def get_node_public_keys(self) -> set[bytes]: + """Retrieve all currently stored `node_public_keys` as a set.""" @abc.abstractmethod def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: diff --git a/src/py/flwr/server/superlink/state/state_factory.py b/src/py/flwr/server/superlink/linkstate/linkstate_factory.py similarity index 80% rename from src/py/flwr/server/superlink/state/state_factory.py rename to src/py/flwr/server/superlink/linkstate/linkstate_factory.py index 96c8d445c16e..403b9bf5b4cc 100644 --- a/src/py/flwr/server/superlink/state/state_factory.py +++ b/src/py/flwr/server/superlink/linkstate/linkstate_factory.py @@ -20,13 +20,13 @@ from flwr.common.logger import log -from .in_memory_state import InMemoryState -from .sqlite_state import SqliteState -from .state import State +from .in_memory_linkstate import InMemoryLinkState +from .linkstate import LinkState +from .sqlite_linkstate import SqliteLinkState -class StateFactory: - """Factory class that creates State instances. +class LinkStateFactory: + """Factory class that creates LinkState instances. Parameters ---------- @@ -39,19 +39,19 @@ class StateFactory: def __init__(self, database: str) -> None: self.database = database - self.state_instance: Optional[State] = None + self.state_instance: Optional[LinkState] = None - def state(self) -> State: + def state(self) -> LinkState: """Return a State instance and create it, if necessary.""" # InMemoryState if self.database == ":flwr-in-memory-state:": if self.state_instance is None: - self.state_instance = InMemoryState() + self.state_instance = InMemoryLinkState() log(DEBUG, "Using InMemoryState") return self.state_instance # SqliteState - state = SqliteState(self.database) + state = SqliteLinkState(self.database) state.initialize() log(DEBUG, "Using SqliteState") return state diff --git a/src/py/flwr/server/superlink/state/state_test.py b/src/py/flwr/server/superlink/linkstate/linkstate_test.py similarity index 68% rename from src/py/flwr/server/superlink/state/state_test.py rename to src/py/flwr/server/superlink/linkstate/linkstate_test.py index 3efce9ca0c88..dec0a3b705e7 100644 --- a/src/py/flwr/server/superlink/state/state_test.py +++ b/src/py/flwr/server/superlink/linkstate/linkstate_test.py @@ -12,17 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests all state implemenations have to conform to.""" -# pylint: disable=invalid-name, disable=R0904 +"""Tests all LinkState implemenations have to conform to.""" +# pylint: disable=invalid-name, too-many-lines, R0904, R0913 import tempfile import time import unittest from abc import abstractmethod from datetime import datetime, timezone -from typing import List from unittest.mock import patch -from uuid import uuid4 +from uuid import UUID from flwr.common import DEFAULT_TTL from flwr.common.constant import ErrorCode @@ -34,7 +33,11 @@ from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 -from flwr.server.superlink.state import InMemoryState, SqliteState, State +from flwr.server.superlink.linkstate import ( + InMemoryLinkState, + LinkState, + SqliteLinkState, +) class StateTest(unittest.TestCase): @@ -44,14 +47,14 @@ class StateTest(unittest.TestCase): __test__ = False @abstractmethod - def state_factory(self) -> State: + def state_factory(self) -> LinkState: """Provide state implementation to test.""" raise NotImplementedError() def test_create_and_get_run(self) -> None: """Test if create_run and get_run work correctly.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {"test_key": "test_value"}) # Execute @@ -150,18 +153,18 @@ def test_store_and_delete_tasks(self) -> None: # Insert one TaskRes and retrive it to mark it as delivered task_res_0 = create_task_res( - producer_node_id=100, + producer_node_id=consumer_node_id, anonymous=False, ancestry=[str(task_id_0)], run_id=run_id, ) _ = state.store_task_res(task_res=task_res_0) - _ = state.get_task_res(task_ids={task_id_0}, limit=None) + _ = state.get_task_res(task_ids={task_id_0}) # Insert one TaskRes, but don't retrive it task_res_1: TaskRes = create_task_res( - producer_node_id=100, + producer_node_id=consumer_node_id, anonymous=False, ancestry=[str(task_id_1)], run_id=run_id, @@ -189,7 +192,7 @@ def test_init_state(self) -> None: state = self.state_factory() # Assert - assert isinstance(state, State) + assert isinstance(state, LinkState) # TaskIns tests def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: @@ -198,7 +201,7 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: Create anonymous task and retrieve it. """ # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -213,7 +216,7 @@ def test_task_ins_store_anonymous_and_retrieve_anonymous(self) -> None: def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: """Store anonymous TaskIns and fail to retrieve it.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -227,7 +230,7 @@ def test_task_ins_store_anonymous_and_fail_retrieving_identitiy(self) -> None: def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: """Store identity TaskIns and fail retrieving it as anonymous.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) @@ -241,7 +244,7 @@ def test_task_ins_store_identity_and_fail_retrieving_anonymous(self) -> None: def test_task_ins_store_identity_and_retrieve_identity(self) -> None: """Store identity TaskIns and retrieve it.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) @@ -258,7 +261,7 @@ def test_task_ins_store_identity_and_retrieve_identity(self) -> None: def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: """Fail retrieving delivered task.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) task_ins = create_task_ins(consumer_node_id=1, anonymous=False, run_id=run_id) @@ -279,7 +282,7 @@ def test_task_ins_store_delivered_and_fail_retrieving(self) -> None: def test_get_task_ins_limit_throws_for_limit_zero(self) -> None: """Fail call with limit=0.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() # Execute & Assert with self.assertRaises(AssertionError): @@ -288,7 +291,7 @@ def test_get_task_ins_limit_throws_for_limit_zero(self) -> None: def test_task_ins_store_invalid_run_id_and_fail(self) -> None: """Store TaskIns with invalid run_id and fail.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=61016) # Execute @@ -301,9 +304,12 @@ def test_task_ins_store_invalid_run_id_and_fail(self) -> None: def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: """Store TaskRes retrieve it by task_ins_id.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) - task_ins_id = uuid4() + + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_id = state.store_task_ins(task_ins) + task_res = create_task_res( producer_node_id=0, anonymous=True, @@ -313,7 +319,9 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: # Execute task_res_uuid = state.store_task_res(task_res) - task_res_list = state.get_task_res(task_ids={task_ins_id}, limit=None) + + assert task_ins_id + task_res_list = state.get_task_res(task_ids={task_ins_id}) # Assert retrieved_task_res = task_res_list[0] @@ -322,7 +330,7 @@ def test_task_res_store_and_retrieve_by_task_ins_id(self) -> None: def test_node_ids_initial_state(self) -> None: """Test retrieving all node_ids and empty initial state.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) # Execute @@ -334,7 +342,7 @@ def test_node_ids_initial_state(self) -> None: def test_create_node_and_get_nodes(self) -> None: """Test creating a client node.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) node_ids = [] @@ -350,7 +358,7 @@ def test_create_node_and_get_nodes(self) -> None: def test_create_node_public_key(self) -> None: """Test creating a client node with public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" run_id = state.create_run(None, None, "9f86d08", {}) @@ -366,7 +374,7 @@ def test_create_node_public_key(self) -> None: def test_create_node_public_key_twice(self) -> None: """Test creating a client node with same public key twice.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10, public_key=public_key) @@ -382,14 +390,14 @@ def test_create_node_public_key_twice(self) -> None: assert retrieved_node_id == node_id # Assert node_ids and public_key_to_node_id are synced - if isinstance(state, InMemoryState): + if isinstance(state, InMemoryLinkState): assert len(state.node_ids) == 1 assert len(state.public_key_to_node_id) == 1 def test_delete_node(self) -> None: """Test deleting a client node.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10) @@ -403,7 +411,7 @@ def test_delete_node(self) -> None: def test_delete_node_public_key(self) -> None: """Test deleting a client node with public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" run_id = state.create_run(None, None, "9f86d08", {}) node_id = state.create_node(ping_interval=10, public_key=public_key) @@ -420,7 +428,7 @@ def test_delete_node_public_key(self) -> None: def test_delete_node_public_key_none(self) -> None: """Test deleting a client node with public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" run_id = state.create_run(None, None, "9f86d08", {}) node_id = 0 @@ -438,7 +446,7 @@ def test_delete_node_public_key_none(self) -> None: def test_delete_node_wrong_public_key(self) -> None: """Test deleting a client node with wrong public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" wrong_public_key = b"mock_mock" run_id = state.create_run(None, None, "9f86d08", {}) @@ -457,7 +465,7 @@ def test_delete_node_wrong_public_key(self) -> None: def test_get_node_id_wrong_public_key(self) -> None: """Test retrieving a client node with wrong public key.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() public_key = b"mock" wrong_public_key = b"mock_mock" run_id = state.create_run(None, None, "9f86d08", {}) @@ -474,7 +482,7 @@ def test_get_node_id_wrong_public_key(self) -> None: def test_get_nodes_invalid_run_id(self) -> None: """Test retrieving all node_ids with invalid run_id.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() state.create_run(None, None, "9f86d08", {}) invalid_run_id = 61016 state.create_node(ping_interval=10) @@ -488,7 +496,7 @@ def test_get_nodes_invalid_run_id(self) -> None: def test_num_task_ins(self) -> None: """Test if num_tasks returns correct number of not delivered task_ins.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) task_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) task_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) @@ -506,13 +514,25 @@ def test_num_task_ins(self) -> None: def test_num_task_res(self) -> None: """Test if num_tasks returns correct number of not delivered task_res.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins_0 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_1 = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins_id_0 = state.store_task_ins(task_ins_0) + task_ins_id_1 = state.store_task_ins(task_ins_1) + task_0 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id_0)], + run_id=run_id, ) task_1 = create_task_res( - producer_node_id=0, anonymous=True, ancestry=["1"], run_id=run_id + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id_1)], + run_id=run_id, ) # Store two tasks @@ -528,7 +548,7 @@ def test_num_task_res(self) -> None: def test_server_private_public_key(self) -> None: """Test get server private and public key after inserting.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() private_key, public_key = generate_key_pairs() private_key_bytes = private_key_to_bytes(private_key) public_key_bytes = public_key_to_bytes(public_key) @@ -545,7 +565,7 @@ def test_server_private_public_key(self) -> None: def test_server_private_public_key_none(self) -> None: """Test get server private and public key without inserting.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() # Execute server_private_key = state.get_server_private_key() @@ -558,7 +578,7 @@ def test_server_private_public_key_none(self) -> None: def test_store_server_private_public_key_twice(self) -> None: """Test inserting private and public key twice.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() private_key, public_key = generate_key_pairs() private_key_bytes = private_key_to_bytes(private_key) public_key_bytes = public_key_to_bytes(public_key) @@ -575,39 +595,39 @@ def test_store_server_private_public_key_twice(self) -> None: new_private_key_bytes, new_public_key_bytes ) - def test_client_public_keys(self) -> None: - """Test store_client_public_keys and get_client_public_keys from state.""" + def test_node_public_keys(self) -> None: + """Test store_node_public_keys and get_node_public_keys from state.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() key_pairs = [generate_key_pairs() for _ in range(3)] public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} # Execute - state.store_client_public_keys(public_keys) - client_public_keys = state.get_client_public_keys() + state.store_node_public_keys(public_keys) + node_public_keys = state.get_node_public_keys() # Assert - assert client_public_keys == public_keys + assert node_public_keys == public_keys - def test_client_public_key(self) -> None: - """Test store_client_public_key and get_client_public_keys from state.""" + def test_node_public_key(self) -> None: + """Test store_node_public_key and get_node_public_keys from state.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() key_pairs = [generate_key_pairs() for _ in range(3)] public_keys = {public_key_to_bytes(pair[1]) for pair in key_pairs} # Execute for public_key in public_keys: - state.store_client_public_key(public_key) - client_public_keys = state.get_client_public_keys() + state.store_node_public_key(public_key) + node_public_keys = state.get_node_public_keys() # Assert - assert client_public_keys == public_keys + assert node_public_keys == public_keys def test_acknowledge_ping(self) -> None: """Test if acknowledge_ping works and if get_nodes return online nodes.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) node_ids = [state.create_node(ping_interval=10) for _ in range(100)] for node_id in node_ids[:70]: @@ -626,7 +646,7 @@ def test_acknowledge_ping(self) -> None: def test_node_unavailable_error(self) -> None: """Test if get_task_res return TaskRes containing node unavailable error.""" # Prepare - state: State = self.state_factory() + state: LinkState = self.state_factory() run_id = state.create_run(None, None, "9f86d08", {}) node_id_0 = state.create_node(ping_interval=90) node_id_1 = state.create_node(ping_interval=30) @@ -646,7 +666,7 @@ def test_node_unavailable_error(self) -> None: # Create and store TaskRes task_res_0 = create_task_res( - producer_node_id=100, + producer_node_id=node_id_0, anonymous=False, ancestry=[str(task_id_0)], run_id=run_id, @@ -655,9 +675,9 @@ def test_node_unavailable_error(self) -> None: # Execute current_time = time.time() - task_res_list: List[TaskRes] = [] + task_res_list: list[TaskRes] = [] with patch("time.time", side_effect=lambda: current_time + 50): - task_res_list = state.get_task_res({task_id_0, task_id_1}, limit=None) + task_res_list = state.get_task_res({task_id_0, task_id_1}) # Assert assert len(task_res_list) == 2 @@ -665,6 +685,222 @@ def test_node_unavailable_error(self) -> None: assert err_taskres.task.HasField("error") assert err_taskres.task.error.code == ErrorCode.NODE_UNAVAILABLE + def test_store_task_res_task_ins_expired(self) -> None: + """Test behavior of store_task_res when the TaskIns it references is expired.""" + # Prepare + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins = create_task_ins(consumer_node_id=0, anonymous=True, run_id=run_id) + task_ins.task.created_at = time.time() - task_ins.task.ttl + 0.5 + task_ins_id = state.store_task_ins(task_ins) + + with patch( + "time.time", + side_effect=lambda: task_ins.task.created_at + task_ins.task.ttl + 0.1, + ): # Expired by 0.1 seconds + task = create_task_res( + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + + # Execute + result = state.store_task_res(task) + + # Assert + assert result is None + + def test_store_task_res_limit_ttl(self) -> None: + """Test the behavior of store_task_res regarding the TTL limit of TaskRes.""" + current_time = time.time() + + test_cases = [ + ( + current_time - 5, + 10, + current_time - 2, + 6, + True, + ), # TaskRes within allowed TTL + ( + current_time - 5, + 10, + current_time - 2, + 15, + False, + ), # TaskRes TTL exceeds max allowed TTL + ] + + for ( + task_ins_created_at, + task_ins_ttl, + task_res_created_at, + task_res_ttl, + expected_store_result, + ) in test_cases: + + # Prepare + state: LinkState = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + + task_ins = create_task_ins( + consumer_node_id=0, anonymous=True, run_id=run_id + ) + task_ins.task.created_at = task_ins_created_at + task_ins.task.ttl = task_ins_ttl + task_ins_id = state.store_task_ins(task_ins) + + task_res = create_task_res( + producer_node_id=0, + anonymous=True, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + task_res.task.created_at = task_res_created_at + task_res.task.ttl = task_res_ttl + + # Execute + res = state.store_task_res(task_res) + + # Assert + if expected_store_result: + assert res is not None + else: + assert res is None + + def test_get_task_ins_not_return_expired(self) -> None: + """Test get_task_ins not to return expired tasks.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + task_ins.task.created_at = time.time() - 5 + task_ins.task.ttl = 5.0 + + # Execute + state.store_task_ins(task_ins=task_ins) + + # Assert + with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1): + task_ins_list = state.get_task_ins(node_id=1, limit=None) + assert len(task_ins_list) == 0 + + def test_get_task_res_not_return_expired(self) -> None: + """Test get_task_res not to return TaskRes if its TaskIns is expired.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + task_ins.task.created_at = time.time() - 5 + task_ins.task.ttl = 5.1 + + task_id = state.store_task_ins(task_ins=task_ins) + + task_res = create_task_res( + producer_node_id=1, + anonymous=False, + ancestry=[str(task_id)], + run_id=run_id, + ) + task_res.task.ttl = 0.1 + _ = state.store_task_res(task_res=task_res) + + with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1): + # Execute + assert task_id is not None + task_res_list = state.get_task_res(task_ids={task_id}) + + # Assert + assert len(task_res_list) == 0 + + def test_get_task_res_returns_empty_for_missing_taskins(self) -> None: + """Test that get_task_res returns an empty result when the corresponding TaskIns + does not exist.""" + # Prepare + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins_id = "5b0a3fc2-edba-4525-a89a-04b83420b7c8" + + task_res = create_task_res( + producer_node_id=1, + anonymous=False, + ancestry=[str(task_ins_id)], + run_id=run_id, + ) + _ = state.store_task_res(task_res=task_res) + + # Execute + task_res_list = state.get_task_res(task_ids={UUID(task_ins_id)}) + + # Assert + assert len(task_res_list) == 0 + + def test_get_task_res_return_if_not_expired(self) -> None: + """Test get_task_res to return TaskRes if its TaskIns exists and is not + expired.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + task_ins.task.created_at = time.time() - 5 + task_ins.task.ttl = 7.1 + + task_id = state.store_task_ins(task_ins=task_ins) + + task_res = create_task_res( + producer_node_id=1, + anonymous=False, + ancestry=[str(task_id)], + run_id=run_id, + ) + task_res.task.ttl = 0.1 + _ = state.store_task_res(task_res=task_res) + + with patch("time.time", side_effect=lambda: task_ins.task.created_at + 6.1): + # Execute + assert task_id is not None + task_res_list = state.get_task_res(task_ids={task_id}) + + # Assert + assert len(task_res_list) != 0 + + def test_store_task_res_fail_if_consumer_producer_id_mismatch(self) -> None: + """Test store_task_res to fail if there is a mismatch between the + consumer_node_id of taskIns and the producer_node_id of taskRes.""" + # Prepare + consumer_node_id = 1 + state = self.state_factory() + run_id = state.create_run(None, None, "9f86d08", {}) + task_ins = create_task_ins( + consumer_node_id=consumer_node_id, anonymous=False, run_id=run_id + ) + + task_id = state.store_task_ins(task_ins=task_ins) + + task_res = create_task_res( + producer_node_id=100, # different than consumer_node_id + anonymous=False, + ancestry=[str(task_id)], + run_id=run_id, + ) + + # Execute + task_res_uuid = state.store_task_res(task_res=task_res) + + # Assert + assert task_res_uuid is None + def create_task_ins( consumer_node_id: int, @@ -698,7 +934,7 @@ def create_task_ins( def create_task_res( producer_node_id: int, anonymous: bool, - ancestry: List[str], + ancestry: list[str], run_id: int, ) -> TaskRes: """Create a TaskRes for testing.""" @@ -725,9 +961,9 @@ class InMemoryStateTest(StateTest): __test__ = True - def state_factory(self) -> State: + def state_factory(self) -> LinkState: """Return InMemoryState.""" - return InMemoryState() + return InMemoryLinkState() class SqliteInMemoryStateTest(StateTest, unittest.TestCase): @@ -735,9 +971,9 @@ class SqliteInMemoryStateTest(StateTest, unittest.TestCase): __test__ = True - def state_factory(self) -> SqliteState: + def state_factory(self) -> SqliteLinkState: """Return SqliteState with in-memory database.""" - state = SqliteState(":memory:") + state = SqliteLinkState(":memory:") state.initialize() return state @@ -758,11 +994,11 @@ class SqliteFileBasedTest(StateTest, unittest.TestCase): __test__ = True - def state_factory(self) -> SqliteState: + def state_factory(self) -> SqliteLinkState: """Return SqliteState with file-based database.""" # pylint: disable-next=consider-using-with,attribute-defined-outside-init self.tmp_file = tempfile.NamedTemporaryFile() - state = SqliteState(database_path=self.tmp_file.name) + state = SqliteLinkState(database_path=self.tmp_file.name) state.initialize() return state diff --git a/src/py/flwr/server/superlink/state/sqlite_state.py b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate.py similarity index 71% rename from src/py/flwr/server/superlink/state/sqlite_state.py rename to src/py/flwr/server/superlink/linkstate/sqlite_linkstate.py index 93b3cd63ca7f..4344ce8b062d 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state.py +++ b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate.py @@ -12,27 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""SQLite based implemenation of server state.""" +"""SQLite based implemenation of the link state.""" +# pylint: disable=too-many-lines import json import re import sqlite3 import time -from logging import DEBUG, ERROR -from typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union, cast +from collections.abc import Sequence +from logging import DEBUG, ERROR, WARNING +from typing import Any, Optional, Union, cast from uuid import UUID, uuid4 from flwr.common import log, now -from flwr.common.constant import NODE_ID_NUM_BYTES, RUN_ID_NUM_BYTES +from flwr.common.constant import ( + MESSAGE_TTL_TOLERANCE, + NODE_ID_NUM_BYTES, + RUN_ID_NUM_BYTES, +) from flwr.common.typing import Run, UserConfig from flwr.proto.node_pb2 import Node # pylint: disable=E0611 from flwr.proto.recordset_pb2 import RecordSet # pylint: disable=E0611 from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 from flwr.server.utils.validator import validate_task_ins_or_res -from .state import State -from .utils import generate_rand_int_from_bytes, make_node_unavailable_taskres +from .linkstate import LinkState +from .utils import ( + convert_sint64_to_uint64, + convert_sint64_values_in_dict_to_uint64, + convert_uint64_to_sint64, + convert_uint64_values_in_dict_to_sint64, + generate_rand_int_from_bytes, + make_node_unavailable_taskres, +) SQL_CREATE_TABLE_NODE = """ CREATE TABLE IF NOT EXISTS node( @@ -110,11 +123,11 @@ ); """ -DictOrTuple = Union[Tuple[Any, ...], Dict[str, Any]] +DictOrTuple = Union[tuple[Any, ...], dict[str, Any]] -class SqliteState(State): # pylint: disable=R0904 - """SQLite-based state implementation.""" +class SqliteLinkState(LinkState): # pylint: disable=R0904 + """SQLite-based LinkState implementation.""" def __init__( self, @@ -131,13 +144,18 @@ def __init__( self.database_path = database_path self.conn: Optional[sqlite3.Connection] = None - def initialize(self, log_queries: bool = False) -> List[Tuple[str]]: + def initialize(self, log_queries: bool = False) -> list[tuple[str]]: """Create tables if they don't exist yet. Parameters ---------- log_queries : bool Log each query which is executed. + + Returns + ------- + list[tuple[str]] + The list of all tables in the DB. """ self.conn = sqlite3.connect(self.database_path) self.conn.execute("PRAGMA foreign_keys = ON;") @@ -162,10 +180,10 @@ def query( self, query: str, data: Optional[Union[Sequence[DictOrTuple], DictOrTuple]] = None, - ) -> List[Dict[str, Any]]: + ) -> list[dict[str, Any]]: """Execute a SQL query.""" if self.conn is None: - raise AttributeError("State is not initialized.") + raise AttributeError("LinkState is not initialized.") if data is None: data = [] @@ -198,9 +216,9 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: Usually, the Driver API calls this to schedule instructions. - Stores the value of the task_ins in the state and, if successful, returns the - task_id (UUID) of the task_ins. If, for any reason, storing the task_ins fails, - `None` is returned. + Stores the value of the task_ins in the link state and, if successful, + returns the task_id (UUID) of the task_ins. If, for any reason, storing + the task_ins fails, `None` is returned. Constraints ----------- @@ -222,6 +240,12 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: # Store TaskIns task_ins.task_id = str(task_id) data = (task_ins_to_dict(task_ins),) + + # Convert values from uint64 to sint64 for SQLite + convert_uint64_values_in_dict_to_sint64( + data[0], ["run_id", "producer_node_id", "consumer_node_id"] + ) + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_ins VALUES({columns});" @@ -237,7 +261,7 @@ def store_task_ins(self, task_ins: TaskIns) -> Optional[UUID]: def get_task_ins( self, node_id: Optional[int], limit: Optional[int] - ) -> List[TaskIns]: + ) -> list[TaskIns]: """Get undelivered TaskIns for one node (either anonymous or with ID). Usually, the Fleet API calls this for Nodes planning to work on one or more @@ -271,7 +295,7 @@ def get_task_ins( ) raise AssertionError(msg) - data: Dict[str, Union[str, int]] = {} + data: dict[str, Union[str, int]] = {} if node_id is None: # Retrieve all anonymous Tasks @@ -281,8 +305,12 @@ def get_task_ins( WHERE consumer_anonymous == 1 AND consumer_node_id == 0 AND delivered_at = "" + AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) """ else: + # Convert the uint64 value to sint64 for SQLite + data["node_id"] = convert_uint64_to_sint64(node_id) + # Retrieve all TaskIns for node_id query = """ SELECT task_id @@ -290,8 +318,8 @@ def get_task_ins( WHERE consumer_anonymous == 0 AND consumer_node_id == :node_id AND delivered_at = "" + AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) """ - data["node_id"] = node_id if limit is not None: query += " LIMIT :limit" @@ -321,6 +349,12 @@ def get_task_ins( # Run query rows = self.query(query, data) + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + result = [dict_to_task_ins(row) for row in rows] return result @@ -350,9 +384,57 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: # Create task_id task_id = uuid4() - # Store TaskIns + task_ins_id = task_res.task.ancestry[0] + task_ins = self.get_valid_task_ins(task_ins_id) + if task_ins is None: + log( + ERROR, + "Failed to store TaskRes: " + "TaskIns with task_id %s does not exist or has expired.", + task_ins_id, + ) + return None + + # Ensure that the consumer_id of taskIns matches the producer_id of taskRes. + if ( + task_ins + and task_res + and not (task_ins["consumer_anonymous"] or task_res.task.producer.anonymous) + and convert_sint64_to_uint64(task_ins["consumer_node_id"]) + != task_res.task.producer.node_id + ): + return None + + # Fail if the TaskRes TTL exceeds the + # expiration time of the TaskIns it replies to. + # Condition: TaskIns.created_at + TaskIns.ttl ≥ + # TaskRes.created_at + TaskRes.ttl + # A small tolerance is introduced to account + # for floating-point precision issues. + max_allowed_ttl = ( + task_ins["created_at"] + task_ins["ttl"] - task_res.task.created_at + ) + if task_res.task.ttl and ( + task_res.task.ttl - max_allowed_ttl > MESSAGE_TTL_TOLERANCE + ): + log( + WARNING, + "Received TaskRes with TTL %.2f " + "exceeding the allowed maximum TTL %.2f.", + task_res.task.ttl, + max_allowed_ttl, + ) + return None + + # Store TaskRes task_res.task_id = str(task_id) data = (task_res_to_dict(task_res),) + + # Convert values from uint64 to sint64 for SQLite + convert_uint64_values_in_dict_to_sint64( + data[0], ["run_id", "producer_node_id", "consumer_node_id"] + ) + columns = ", ".join([f":{key}" for key in data[0]]) query = f"INSERT INTO task_res VALUES({columns});" @@ -366,8 +448,8 @@ def store_task_res(self, task_res: TaskRes) -> Optional[UUID]: return task_id - # pylint: disable-next=R0914 - def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRes]: + # pylint: disable-next=R0912,R0915,R0914 + def get_task_res(self, task_ids: set[UUID]) -> list[TaskRes]: """Get TaskRes for task_ids. Usually, the Driver API calls this method to get results for instructions it has @@ -382,8 +464,34 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe will only take effect if enough task_ids are in the set AND are currently available. If `limit` is set, it has to be greater than zero. """ - if limit is not None and limit < 1: - raise AssertionError("`limit` must be >= 1") + # Check if corresponding TaskIns exists and is not expired + task_ids_placeholders = ",".join([f":id_{i}" for i in range(len(task_ids))]) + query = f""" + SELECT * + FROM task_ins + WHERE task_id IN ({task_ids_placeholders}) + AND (created_at + ttl) > CAST(strftime('%s', 'now') AS REAL) + """ + query += ";" + + task_ins_data = {} + for index, task_id in enumerate(task_ids): + task_ins_data[f"id_{index}"] = str(task_id) + + task_ins_rows = self.query(query, task_ins_data) + + if not task_ins_rows: + return [] + + for row in task_ins_rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + task_ins = dict_to_task_ins(row) + if task_ins.task.created_at + task_ins.task.ttl <= time.time(): + log(WARNING, "TaskIns with task_id %s is expired.", task_ins.task_id) + task_ids.remove(UUID(task_ins.task_id)) # Retrieve all anonymous Tasks if len(task_ids) == 0: @@ -397,11 +505,7 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe AND delivered_at = "" """ - data: Dict[str, Union[str, float, int]] = {} - - if limit is not None: - query += " LIMIT :limit" - data["limit"] = limit + data: dict[str, Union[str, float, int]] = {} query += ";" @@ -430,12 +534,18 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Run query rows = self.query(query, data) + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + result = [dict_to_task_res(row) for row in rows] # 1. Query: Fetch consumer_node_id of remaining task_ids # Assume the ancestry field only contains one element data.clear() - replied_task_ids: Set[UUID] = {UUID(str(row["ancestry"])) for row in rows} + replied_task_ids: set[UUID] = {UUID(str(row["ancestry"])) for row in rows} remaining_task_ids = task_ids - replied_task_ids placeholders = ",".join([f":id_{i}" for i in range(len(remaining_task_ids))]) query = f""" @@ -471,8 +581,12 @@ def get_task_res(self, task_ids: Set[UUID], limit: Optional[int]) -> List[TaskRe # Make TaskRes containing node unavailabe error for row in task_ins_rows: - if limit and len(result) == limit: - break + for row in rows: + # Convert values from sint64 to uint64 + convert_sint64_values_in_dict_to_uint64( + row, ["run_id", "producer_node_id", "consumer_node_id"] + ) + task_ins = dict_to_task_ins(row) err_taskres = make_node_unavailable_taskres( ref_taskins=task_ins, @@ -499,10 +613,10 @@ def num_task_res(self) -> int: """ query = "SELECT count(*) AS num FROM task_res;" rows = self.query(query) - result: Dict[str, int] = rows[0] + result: dict[str, int] = rows[0] return result["num"] - def delete_tasks(self, task_ids: Set[UUID]) -> None: + def delete_tasks(self, task_ids: set[UUID]) -> None: """Delete all delivered TaskIns/TaskRes pairs.""" ids = list(task_ids) if len(ids) == 0: @@ -531,7 +645,7 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: """ if self.conn is None: - raise AttributeError("State not intitialized") + raise AttributeError("LinkState not intitialized") with self.conn: self.conn.execute(query_1, data) @@ -542,9 +656,12 @@ def delete_tasks(self, task_ids: Set[UUID]) -> None: def create_node( self, ping_interval: float, public_key: Optional[bytes] = None ) -> int: - """Create, store in state, and return `node_id`.""" - # Sample a random int64 as node_id - node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + """Create, store in the link state, and return `node_id`.""" + # Sample a random uint64 as node_id + uint64_node_id = generate_rand_int_from_bytes(NODE_ID_NUM_BYTES) + + # Convert the uint64 value to sint64 for SQLite + sint64_node_id = convert_uint64_to_sint64(uint64_node_id) query = "SELECT node_id FROM node WHERE public_key = :public_key;" row = self.query(query, {"public_key": public_key}) @@ -561,24 +678,35 @@ def create_node( try: self.query( - query, (node_id, time.time() + ping_interval, ping_interval, public_key) + query, + ( + sint64_node_id, + time.time() + ping_interval, + ping_interval, + public_key, + ), ) except sqlite3.IntegrityError: log(ERROR, "Unexpected node registration failure.") return 0 - return node_id + + # Note: we need to return the uint64 value of the node_id + return uint64_node_id def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: - """Delete a client node.""" + """Delete a node.""" + # Convert the uint64 value to sint64 for SQLite + sint64_node_id = convert_uint64_to_sint64(node_id) + query = "DELETE FROM node WHERE node_id = ?" - params = (node_id,) + params = (sint64_node_id,) if public_key is not None: query += " AND public_key = ?" params += (public_key,) # type: ignore if self.conn is None: - raise AttributeError("State is not initialized.") + raise AttributeError("LinkState is not initialized.") try: with self.conn: @@ -588,7 +716,7 @@ def delete_node(self, node_id: int, public_key: Optional[bytes] = None) -> None: except KeyError as exc: log(ERROR, {"query": query, "data": params, "exception": exc}) - def get_nodes(self, run_id: int) -> Set[int]: + def get_nodes(self, run_id: int) -> set[int]: """Retrieve all currently stored node IDs as a set. Constraints @@ -596,24 +724,33 @@ def get_nodes(self, run_id: int) -> Set[int]: If the provided `run_id` does not exist or has no matching nodes, an empty `Set` MUST be returned. """ + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) + # Validate run ID query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" - if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: + if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: return set() # Get nodes query = "SELECT node_id FROM node WHERE online_until > ?;" rows = self.query(query, (time.time(),)) - result: Set[int] = {row["node_id"] for row in rows} + + # Convert sint64 node_ids to uint64 + result: set[int] = {convert_sint64_to_uint64(row["node_id"]) for row in rows} return result - def get_node_id(self, client_public_key: bytes) -> Optional[int]: - """Retrieve stored `node_id` filtered by `client_public_keys`.""" + def get_node_id(self, node_public_key: bytes) -> Optional[int]: + """Retrieve stored `node_id` filtered by `node_public_keys`.""" query = "SELECT node_id FROM node WHERE public_key = :public_key;" - row = self.query(query, {"public_key": client_public_key}) + row = self.query(query, {"public_key": node_public_key}) if len(row) > 0: node_id: int = row[0]["node_id"] - return node_id + + # Convert the sint64 value to uint64 after reading from SQLite + uint64_node_id = convert_sint64_to_uint64(node_id) + + return uint64_node_id return None def create_run( @@ -625,12 +762,15 @@ def create_run( ) -> int: """Create a new run for the specified `fab_id` and `fab_version`.""" # Sample a random int64 as run_id - run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + uint64_run_id = generate_rand_int_from_bytes(RUN_ID_NUM_BYTES) + + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(uint64_run_id) # Check conflicts query = "SELECT COUNT(*) FROM run WHERE run_id = ?;" - # If run_id does not exist - if self.query(query, (run_id,))[0]["COUNT(*)"] == 0: + # If sint64_run_id does not exist + if self.query(query, (sint64_run_id,))[0]["COUNT(*)"] == 0: query = ( "INSERT INTO run " "(run_id, fab_id, fab_version, fab_hash, override_config)" @@ -638,21 +778,29 @@ def create_run( ) if fab_hash: self.query( - query, (run_id, "", "", fab_hash, json.dumps(override_config)) + query, + (sint64_run_id, "", "", fab_hash, json.dumps(override_config)), ) else: self.query( query, - (run_id, fab_id, fab_version, "", json.dumps(override_config)), + ( + sint64_run_id, + fab_id, + fab_version, + "", + json.dumps(override_config), + ), ) - return run_id + # Note: we need to return the uint64 value of the run_id + return uint64_run_id log(ERROR, "Unexpected run creation failure.") return 0 def store_server_private_public_key( self, private_key: bytes, public_key: bytes ) -> None: - """Store `server_private_key` and `server_public_key` in state.""" + """Store `server_private_key` and `server_public_key` in the link state.""" query = "SELECT COUNT(*) FROM credential" count = self.query(query)[0]["COUNT(*)"] if count < 1: @@ -684,56 +832,89 @@ def get_server_public_key(self) -> Optional[bytes]: public_key = None return public_key - def store_client_public_keys(self, public_keys: Set[bytes]) -> None: - """Store a set of `client_public_keys` in state.""" + def store_node_public_keys(self, public_keys: set[bytes]) -> None: + """Store a set of `node_public_keys` in the link state.""" query = "INSERT INTO public_key (public_key) VALUES (?)" data = [(key,) for key in public_keys] self.query(query, data) - def store_client_public_key(self, public_key: bytes) -> None: - """Store a `client_public_key` in state.""" + def store_node_public_key(self, public_key: bytes) -> None: + """Store a `node_public_key` in the link state.""" query = "INSERT INTO public_key (public_key) VALUES (:public_key)" self.query(query, {"public_key": public_key}) - def get_client_public_keys(self) -> Set[bytes]: - """Retrieve all currently stored `client_public_keys` as a set.""" + def get_node_public_keys(self) -> set[bytes]: + """Retrieve all currently stored `node_public_keys` as a set.""" query = "SELECT public_key FROM public_key" rows = self.query(query) - result: Set[bytes] = {row["public_key"] for row in rows} + result: set[bytes] = {row["public_key"] for row in rows} return result def get_run(self, run_id: int) -> Optional[Run]: """Retrieve information about the run with the specified `run_id`.""" + # Convert the uint64 value to sint64 for SQLite + sint64_run_id = convert_uint64_to_sint64(run_id) query = "SELECT * FROM run WHERE run_id = ?;" - try: - row = self.query(query, (run_id,))[0] + rows = self.query(query, (sint64_run_id,)) + if rows: + row = rows[0] return Run( - run_id=run_id, + run_id=convert_sint64_to_uint64(row["run_id"]), fab_id=row["fab_id"], fab_version=row["fab_version"], fab_hash=row["fab_hash"], override_config=json.loads(row["override_config"]), ) - except sqlite3.IntegrityError: - log(ERROR, "`run_id` does not exist.") - return None + log(ERROR, "`run_id` does not exist.") + return None def acknowledge_ping(self, node_id: int, ping_interval: float) -> bool: """Acknowledge a ping received from a node, serving as a heartbeat.""" + sint64_node_id = convert_uint64_to_sint64(node_id) + # Update `online_until` and `ping_interval` for the given `node_id` query = "UPDATE node SET online_until = ?, ping_interval = ? WHERE node_id = ?;" try: - self.query(query, (time.time() + ping_interval, ping_interval, node_id)) + self.query( + query, (time.time() + ping_interval, ping_interval, sint64_node_id) + ) return True except sqlite3.IntegrityError: log(ERROR, "`node_id` does not exist.") return False + def get_valid_task_ins(self, task_id: str) -> Optional[dict[str, Any]]: + """Check if the TaskIns exists and is valid (not expired). + + Return TaskIns if valid. + """ + query = """ + SELECT * + FROM task_ins + WHERE task_id = :task_id + """ + data = {"task_id": task_id} + rows = self.query(query, data) + if not rows: + # TaskIns does not exist + return None + + task_ins = rows[0] + created_at = task_ins["created_at"] + ttl = task_ins["ttl"] + current_time = time.time() + + # Check if TaskIns is expired + if ttl is not None and created_at + ttl <= current_time: + return None + + return task_ins + def dict_factory( cursor: sqlite3.Cursor, row: sqlite3.Row, -) -> Dict[str, Any]: +) -> dict[str, Any]: """Turn SQLite results into dicts. Less efficent for retrival of large amounts of data but easier to use. @@ -742,7 +923,7 @@ def dict_factory( return dict(zip(fields, row)) -def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: +def task_ins_to_dict(task_msg: TaskIns) -> dict[str, Any]: """Transform TaskIns to dict.""" result = { "task_id": task_msg.task_id, @@ -763,7 +944,7 @@ def task_ins_to_dict(task_msg: TaskIns) -> Dict[str, Any]: return result -def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: +def task_res_to_dict(task_msg: TaskRes) -> dict[str, Any]: """Transform TaskRes to dict.""" result = { "task_id": task_msg.task_id, @@ -784,7 +965,7 @@ def task_res_to_dict(task_msg: TaskRes) -> Dict[str, Any]: return result -def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: +def dict_to_task_ins(task_dict: dict[str, Any]) -> TaskIns: """Turn task_dict into protobuf message.""" recordset = RecordSet() recordset.ParseFromString(task_dict["recordset"]) @@ -814,7 +995,7 @@ def dict_to_task_ins(task_dict: Dict[str, Any]) -> TaskIns: return result -def dict_to_task_res(task_dict: Dict[str, Any]) -> TaskRes: +def dict_to_task_res(task_dict: dict[str, Any]) -> TaskRes: """Turn task_dict into protobuf message.""" recordset = RecordSet() recordset.ParseFromString(task_dict["recordset"]) diff --git a/src/py/flwr/server/superlink/state/sqlite_state_test.py b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate_test.py similarity index 91% rename from src/py/flwr/server/superlink/state/sqlite_state_test.py rename to src/py/flwr/server/superlink/linkstate/sqlite_linkstate_test.py index 10e12da96bd5..ed2960ef76fa 100644 --- a/src/py/flwr/server/superlink/state/sqlite_state_test.py +++ b/src/py/flwr/server/superlink/linkstate/sqlite_linkstate_test.py @@ -17,8 +17,8 @@ import unittest -from flwr.server.superlink.state.sqlite_state import task_ins_to_dict -from flwr.server.superlink.state.state_test import create_task_ins +from flwr.server.superlink.linkstate.linkstate_test import create_task_ins +from flwr.server.superlink.linkstate.sqlite_linkstate import task_ins_to_dict class SqliteStateTest(unittest.TestCase): diff --git a/src/py/flwr/server/superlink/linkstate/utils.py b/src/py/flwr/server/superlink/linkstate/utils.py new file mode 100644 index 000000000000..db44719c6a8a --- /dev/null +++ b/src/py/flwr/server/superlink/linkstate/utils.py @@ -0,0 +1,148 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utility functions for State.""" + + +import time +from logging import ERROR +from os import urandom +from uuid import uuid4 + +from flwr.common import log +from flwr.common.constant import ErrorCode +from flwr.proto.error_pb2 import Error # pylint: disable=E0611 +from flwr.proto.node_pb2 import Node # pylint: disable=E0611 +from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 + +NODE_UNAVAILABLE_ERROR_REASON = ( + "Error: Node Unavailable - The destination node is currently unavailable. " + "It exceeds the time limit specified in its last ping." +) + + +def generate_rand_int_from_bytes(num_bytes: int) -> int: + """Generate a random unsigned integer from `num_bytes` bytes.""" + return int.from_bytes(urandom(num_bytes), "little", signed=False) + + +def convert_uint64_to_sint64(u: int) -> int: + """Convert a uint64 value to a sint64 value with the same bit sequence. + + Parameters + ---------- + u : int + The unsigned 64-bit integer to convert. + + Returns + ------- + int + The signed 64-bit integer equivalent. + + The signed 64-bit integer will have the same bit pattern as the + unsigned 64-bit integer but may have a different decimal value. + + For numbers within the range [0, `sint64` max value], the decimal + value remains the same. However, for numbers greater than the `sint64` + max value, the decimal value will differ due to the wraparound caused + by the sign bit. + """ + if u >= (1 << 63): + return u - (1 << 64) + return u + + +def convert_sint64_to_uint64(s: int) -> int: + """Convert a sint64 value to a uint64 value with the same bit sequence. + + Parameters + ---------- + s : int + The signed 64-bit integer to convert. + + Returns + ------- + int + The unsigned 64-bit integer equivalent. + + The unsigned 64-bit integer will have the same bit pattern as the + signed 64-bit integer but may have a different decimal value. + + For negative `sint64` values, the conversion adds 2^64 to the + signed value to obtain the equivalent `uint64` value. For non-negative + `sint64` values, the decimal value remains unchanged in the `uint64` + representation. + """ + if s < 0: + return s + (1 << 64) + return s + + +def convert_uint64_values_in_dict_to_sint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert uint64 values to sint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_uint64_to_sint64(data_dict[key]) + + +def convert_sint64_values_in_dict_to_uint64( + data_dict: dict[str, int], keys: list[str] +) -> None: + """Convert sint64 values to uint64 in the given dictionary. + + Parameters + ---------- + data_dict : dict[str, int] + A dictionary where the values are integers to be converted. + keys : list[str] + A list of keys in the dictionary whose values need to be converted. + """ + for key in keys: + if key in data_dict: + data_dict[key] = convert_sint64_to_uint64(data_dict[key]) + + +def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: + """Generate a TaskRes with a node unavailable error from a TaskIns.""" + current_time = time.time() + ttl = ref_taskins.task.ttl - (current_time - ref_taskins.task.created_at) + if ttl < 0: + log(ERROR, "Creating TaskRes for TaskIns that exceeds its TTL.") + ttl = 0 + return TaskRes( + task_id=str(uuid4()), + group_id=ref_taskins.group_id, + run_id=ref_taskins.run_id, + task=Task( + producer=Node(node_id=ref_taskins.task.consumer.node_id, anonymous=False), + consumer=Node(node_id=ref_taskins.task.producer.node_id, anonymous=False), + created_at=current_time, + ttl=ttl, + ancestry=[ref_taskins.task_id], + task_type=ref_taskins.task.task_type, + error=Error( + code=ErrorCode.NODE_UNAVAILABLE, reason=NODE_UNAVAILABLE_ERROR_REASON + ), + ), + ) diff --git a/src/py/flwr/server/superlink/linkstate/utils_test.py b/src/py/flwr/server/superlink/linkstate/utils_test.py new file mode 100644 index 000000000000..d55e2ffd9aa3 --- /dev/null +++ b/src/py/flwr/server/superlink/linkstate/utils_test.py @@ -0,0 +1,150 @@ +# Copyright 2024 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Utils tests.""" + +import unittest + +from parameterized import parameterized + +from .utils import ( + convert_sint64_to_uint64, + convert_sint64_values_in_dict_to_uint64, + convert_uint64_to_sint64, + convert_uint64_values_in_dict_to_sint64, + generate_rand_int_from_bytes, +) + + +class UtilsTest(unittest.TestCase): + """Test utils code.""" + + @parameterized.expand( # type: ignore + [ + # Test values within the positive range of sint64 (below 2^63) + (0, 0), # Minimum positive value + (1, 1), # 1 remains 1 in both uint64 and sint64 + (2**62, 2**62), # Mid-range positive value + (2**63 - 1, 2**63 - 1), # Maximum positive value for sint64 + # Test values at or above 2^63 (become negative in sint64) + (2**63, -(2**63)), # Minimum negative value for sint64 + (2**63 + 1, -(2**63) + 1), # Slightly above the boundary + (9223372036854775811, -9223372036854775805), # Some value > sint64 max + (2**64 - 1, -1), # Maximum uint64 value becomes -1 in sint64 + ] + ) + def test_convert_uint64_to_sint64(self, before: int, after: int) -> None: + """Test conversion from uint64 to sint64.""" + self.assertEqual(convert_uint64_to_sint64(before), after) + + @parameterized.expand( # type: ignore + [ + # Test values within the negative range of sint64 + (-(2**63), 2**63), # Minimum sint64 value becomes 2^63 in uint64 + (-(2**63) + 1, 2**63 + 1), # Slightly above the minimum + (-9223372036854775805, 9223372036854775811), # Some value > sint64 max + # Test zero-adjacent inputs + (-1, 2**64 - 1), # -1 in sint64 becomes 2^64 - 1 in uint64 + (0, 0), # 0 remains 0 in both sint64 and uint64 + (1, 1), # 1 remains 1 in both sint64 and uint64 + # Test values within the positive range of sint64 + (2**63 - 1, 2**63 - 1), # Maximum positive value in sint64 + # Test boundary and maximum uint64 value + (2**63, 2**63), # Exact boundary value for sint64 + (2**64 - 1, 2**64 - 1), # Maximum uint64 value, stays the same + ] + ) + def test_sint64_to_uint64(self, before: int, after: int) -> None: + """Test conversion from sint64 to uint64.""" + self.assertEqual(convert_sint64_to_uint64(before), after) + + @parameterized.expand( # type: ignore + [ + (0), + (1), + (2**62), + (2**63 - 1), + (2**63), + (2**63 + 1), + (9223372036854775811), + (2**64 - 1), + ] + ) + def test_uint64_to_sint64_to_uint64(self, expected: int) -> None: + """Test conversion from sint64 to uint64.""" + actual = convert_sint64_to_uint64(convert_uint64_to_sint64(expected)) + self.assertEqual(expected, actual) + + @parameterized.expand( # type: ignore + [ + # Test cases with uint64 values + ( + {"a": 0, "b": 2**63 - 1, "c": 2**63, "d": 2**64 - 1}, + ["a", "b", "c", "d"], + {"a": 0, "b": 2**63 - 1, "c": -(2**63), "d": -1}, + ), + ( + {"a": 1, "b": 2**62, "c": 2**63 + 1}, + ["a", "b", "c"], + {"a": 1, "b": 2**62, "c": -(2**63) + 1}, + ), + # Edge cases with mixed uint64 values and keys + ( + {"a": 2**64 - 1, "b": 12345, "c": 0}, + ["a", "b"], + {"a": -1, "b": 12345, "c": 0}, + ), + ] + ) + def test_convert_uint64_values_in_dict_to_sint64( + self, input_dict: dict[str, int], keys: list[str], expected_dict: dict[str, int] + ) -> None: + """Test uint64 to sint64 conversion in a dictionary.""" + convert_uint64_values_in_dict_to_sint64(input_dict, keys) + self.assertEqual(input_dict, expected_dict) + + @parameterized.expand( # type: ignore + [ + # Test cases with sint64 values + ( + {"a": 0, "b": 2**63 - 1, "c": -(2**63), "d": -1}, + ["a", "b", "c", "d"], + {"a": 0, "b": 2**63 - 1, "c": 2**63, "d": 2**64 - 1}, + ), + ( + {"a": -1, "b": -(2**63) + 1, "c": 12345}, + ["a", "b", "c"], + {"a": 2**64 - 1, "b": 2**63 + 1, "c": 12345}, + ), + # Edge cases with mixed sint64 values and keys + ( + {"a": -1, "b": 12345, "c": 0}, + ["a", "b"], + {"a": 2**64 - 1, "b": 12345, "c": 0}, + ), + ] + ) + def test_convert_sint64_values_in_dict_to_uint64( + self, input_dict: dict[str, int], keys: list[str], expected_dict: dict[str, int] + ) -> None: + """Test sint64 to uint64 conversion in a dictionary.""" + convert_sint64_values_in_dict_to_uint64(input_dict, keys) + self.assertEqual(input_dict, expected_dict) + + def test_generate_rand_int_from_bytes_unsigned_int(self) -> None: + """Test that the generated integer is unsigned (non-negative).""" + for num_bytes in range(1, 9): + with self.subTest(num_bytes=num_bytes): + rand_int = generate_rand_int_from_bytes(num_bytes) + self.assertGreaterEqual(rand_int, 0) diff --git a/src/py/flwr/server/superlink/state/utils.py b/src/py/flwr/server/superlink/state/utils.py deleted file mode 100644 index b12a87ac998d..000000000000 --- a/src/py/flwr/server/superlink/state/utils.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2024 Flower Labs GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility functions for State.""" - - -import time -from logging import ERROR -from os import urandom -from uuid import uuid4 - -from flwr.common import log -from flwr.common.constant import ErrorCode -from flwr.proto.error_pb2 import Error # pylint: disable=E0611 -from flwr.proto.node_pb2 import Node # pylint: disable=E0611 -from flwr.proto.task_pb2 import Task, TaskIns, TaskRes # pylint: disable=E0611 - -NODE_UNAVAILABLE_ERROR_REASON = ( - "Error: Node Unavailable - The destination node is currently unavailable. " - "It exceeds the time limit specified in its last ping." -) - - -def generate_rand_int_from_bytes(num_bytes: int) -> int: - """Generate a random `num_bytes` integer.""" - return int.from_bytes(urandom(num_bytes), "little", signed=True) - - -def make_node_unavailable_taskres(ref_taskins: TaskIns) -> TaskRes: - """Generate a TaskRes with a node unavailable error from a TaskIns.""" - current_time = time.time() - ttl = ref_taskins.task.ttl - (current_time - ref_taskins.task.created_at) - if ttl < 0: - log(ERROR, "Creating TaskRes for TaskIns that exceeds its TTL.") - ttl = 0 - return TaskRes( - task_id=str(uuid4()), - group_id=ref_taskins.group_id, - run_id=ref_taskins.run_id, - task=Task( - producer=Node(node_id=ref_taskins.task.consumer.node_id, anonymous=False), - consumer=Node(node_id=ref_taskins.task.producer.node_id, anonymous=False), - created_at=current_time, - ttl=ttl, - ancestry=[ref_taskins.task_id], - task_type=ref_taskins.task.task_type, - error=Error( - code=ErrorCode.NODE_UNAVAILABLE, reason=NODE_UNAVAILABLE_ERROR_REASON - ), - ), - ) diff --git a/src/py/flwr/server/utils/tensorboard.py b/src/py/flwr/server/utils/tensorboard.py index 5d38fc159657..281e8949c53c 100644 --- a/src/py/flwr/server/utils/tensorboard.py +++ b/src/py/flwr/server/utils/tensorboard.py @@ -18,7 +18,7 @@ import os from datetime import datetime from logging import WARN -from typing import Callable, Dict, List, Optional, Tuple, Union, cast +from typing import Callable, Optional, Union, cast from flwr.common import EvaluateRes, Scalar from flwr.common.logger import log @@ -92,9 +92,9 @@ class TBWrapper(strategy_class): # type: ignore def aggregate_evaluate( self, server_round: int, - results: List[Tuple[ClientProxy, EvaluateRes]], - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], - ) -> Tuple[Optional[float], Dict[str, Scalar]]: + results: list[tuple[ClientProxy, EvaluateRes]], + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> tuple[Optional[float], dict[str, Scalar]]: """Hooks into aggregate_evaluate for TensorBoard logging purpose.""" # Execute decorated function and extract results for logging # They will be returned at the end of this function but also diff --git a/src/py/flwr/server/utils/validator.py b/src/py/flwr/server/utils/validator.py index c0b0ec85761c..01f926c4985d 100644 --- a/src/py/flwr/server/utils/validator.py +++ b/src/py/flwr/server/utils/validator.py @@ -15,13 +15,14 @@ """Validators.""" -from typing import List, Union +import time +from typing import Union from flwr.proto.task_pb2 import TaskIns, TaskRes # pylint: disable=E0611 # pylint: disable-next=too-many-branches,too-many-statements -def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str]: +def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> list[str]: """Validate a TaskIns or TaskRes.""" validation_errors = [] @@ -47,6 +48,11 @@ def validate_task_ins_or_res(tasks_ins_res: Union[TaskIns, TaskRes]) -> List[str # unix timestamp of 27 March 2024 00h:00m:00s UTC validation_errors.append("`pushed_at` is not a recent timestamp") + # Verify TTL and created_at time + current_time = time.time() + if tasks_ins_res.task.created_at + tasks_ins_res.task.ttl <= current_time: + validation_errors.append("Task TTL has expired") + # TaskIns specific if isinstance(tasks_ins_res, TaskIns): # Task producer diff --git a/src/py/flwr/server/utils/validator_test.py b/src/py/flwr/server/utils/validator_test.py index 61fe094c23d4..ce8e3636467c 100644 --- a/src/py/flwr/server/utils/validator_test.py +++ b/src/py/flwr/server/utils/validator_test.py @@ -17,7 +17,6 @@ import time import unittest -from typing import List, Tuple from flwr.common import DEFAULT_TTL from flwr.proto.node_pb2 import Node # pylint: disable=E0611 @@ -52,12 +51,12 @@ def test_is_valid_task_res(self) -> None: """Test is_valid task_res.""" # Prepare # (producer_node_id, anonymous, ancestry) - valid_res: List[Tuple[int, bool, List[str]]] = [ + valid_res: list[tuple[int, bool, list[str]]] = [ (0, True, ["1"]), (1, False, ["1"]), ] - invalid_res: List[Tuple[int, bool, List[str]]] = [ + invalid_res: list[tuple[int, bool, list[str]]] = [ (0, False, []), (0, False, ["1"]), (0, True, []), @@ -77,6 +76,24 @@ def test_is_valid_task_res(self) -> None: val_errors = validate_task_ins_or_res(msg) self.assertTrue(val_errors, (producer_node_id, anonymous, ancestry)) + def test_task_ttl_expired(self) -> None: + """Test validation for expired Task TTL.""" + # Prepare an expired TaskIns + expired_task_ins = create_task_ins(0, True) + expired_task_ins.task.created_at = time.time() - 10 # 10 seconds ago + expired_task_ins.task.ttl = 6 # 6 seconds TTL + + expired_task_res = create_task_res(0, True, ["1"]) + expired_task_res.task.created_at = time.time() - 10 # 10 seconds ago + expired_task_res.task.ttl = 6 # 6 seconds TTL + + # Execute & Assert + val_errors_ins = validate_task_ins_or_res(expired_task_ins) + self.assertIn("Task TTL has expired", val_errors_ins) + + val_errors_res = validate_task_ins_or_res(expired_task_res) + self.assertIn("Task TTL has expired", val_errors_res) + def create_task_ins( consumer_node_id: int, @@ -110,7 +127,7 @@ def create_task_ins( def create_task_res( producer_node_id: int, anonymous: bool, - ancestry: List[str], + ancestry: list[str], ) -> TaskRes: """Create a TaskRes for testing.""" task_res = TaskRes( diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 82d8d5d4ccb6..484a747292d5 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -18,7 +18,7 @@ import io import timeit from logging import INFO, WARN -from typing import List, Optional, Tuple, Union, cast +from typing import Optional, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( @@ -276,8 +276,8 @@ def default_fit_workflow( # pylint: disable=R0914 ) # Aggregate training results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + results: list[tuple[ClientProxy, FitRes]] = [] + failures: list[Union[tuple[ClientProxy, FitRes], BaseException]] = [] for msg in messages: if msg.has_content(): proxy = node_id_to_proxy[msg.metadata.src_node_id] @@ -362,8 +362,8 @@ def default_evaluate_workflow(driver: Driver, context: Context) -> None: ) # Aggregate the evaluation results - results: List[Tuple[ClientProxy, EvaluateRes]] = [] - failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]] = [] + results: list[tuple[ClientProxy, EvaluateRes]] = [] + failures: list[Union[tuple[ClientProxy, EvaluateRes], BaseException]] = [] for msg in messages: if msg.has_content(): proxy = node_id_to_proxy[msg.metadata.src_node_id] diff --git a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py index 322e32ed5019..d84a5496dfe1 100644 --- a/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py +++ b/src/py/flwr/server/workflow/secure_aggregation/secaggplus_workflow.py @@ -18,7 +18,7 @@ import random from dataclasses import dataclass, field from logging import DEBUG, ERROR, INFO, WARN -from typing import Dict, List, Optional, Set, Tuple, Union, cast +from typing import Optional, Union, cast import flwr.common.recordset_compat as compat from flwr.common import ( @@ -65,23 +65,23 @@ class WorkflowState: # pylint: disable=R0902 """The state of the SecAgg+ protocol.""" - nid_to_proxies: Dict[int, ClientProxy] = field(default_factory=dict) - nid_to_fitins: Dict[int, RecordSet] = field(default_factory=dict) - sampled_node_ids: Set[int] = field(default_factory=set) - active_node_ids: Set[int] = field(default_factory=set) + nid_to_proxies: dict[int, ClientProxy] = field(default_factory=dict) + nid_to_fitins: dict[int, RecordSet] = field(default_factory=dict) + sampled_node_ids: set[int] = field(default_factory=set) + active_node_ids: set[int] = field(default_factory=set) num_shares: int = 0 threshold: int = 0 clipping_range: float = 0.0 quantization_range: int = 0 mod_range: int = 0 max_weight: float = 0.0 - nid_to_neighbours: Dict[int, Set[int]] = field(default_factory=dict) - nid_to_publickeys: Dict[int, List[bytes]] = field(default_factory=dict) - forward_srcs: Dict[int, List[int]] = field(default_factory=dict) - forward_ciphertexts: Dict[int, List[bytes]] = field(default_factory=dict) + nid_to_neighbours: dict[int, set[int]] = field(default_factory=dict) + nid_to_publickeys: dict[int, list[bytes]] = field(default_factory=dict) + forward_srcs: dict[int, list[int]] = field(default_factory=dict) + forward_ciphertexts: dict[int, list[bytes]] = field(default_factory=dict) aggregate_ndarrays: NDArrays = field(default_factory=list) - legacy_results: List[Tuple[ClientProxy, FitRes]] = field(default_factory=list) - failures: List[Exception] = field(default_factory=list) + legacy_results: list[tuple[ClientProxy, FitRes]] = field(default_factory=list) + failures: list[Exception] = field(default_factory=list) class SecAggPlusWorkflow: @@ -444,13 +444,13 @@ def make(nid: int) -> Message: ) # Build forward packet list dictionary - srcs: List[int] = [] - dsts: List[int] = [] - ciphertexts: List[bytes] = [] - fwd_ciphertexts: Dict[int, List[bytes]] = { + srcs: list[int] = [] + dsts: list[int] = [] + ciphertexts: list[bytes] = [] + fwd_ciphertexts: dict[int, list[bytes]] = { nid: [] for nid in state.active_node_ids } # dest node ID -> list of ciphertexts - fwd_srcs: Dict[int, List[int]] = { + fwd_srcs: dict[int, list[int]] = { nid: [] for nid in state.active_node_ids } # dest node ID -> list of src node IDs for msg in msgs: @@ -459,8 +459,8 @@ def make(nid: int) -> Message: continue node_id = msg.metadata.src_node_id res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - dst_lst = cast(List[int], res_dict[Key.DESTINATION_LIST]) - ctxt_lst = cast(List[bytes], res_dict[Key.CIPHERTEXT_LIST]) + dst_lst = cast(list[int], res_dict[Key.DESTINATION_LIST]) + ctxt_lst = cast(list[bytes], res_dict[Key.CIPHERTEXT_LIST]) srcs += [node_id] * len(dst_lst) dsts += dst_lst ciphertexts += ctxt_lst @@ -525,7 +525,7 @@ def make(nid: int) -> Message: state.failures.append(Exception(msg.error)) continue res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - bytes_list = cast(List[bytes], res_dict[Key.MASKED_PARAMETERS]) + bytes_list = cast(list[bytes], res_dict[Key.MASKED_PARAMETERS]) client_masked_vec = [bytes_to_ndarray(b) for b in bytes_list] if masked_vector is None: masked_vector = client_masked_vec @@ -592,7 +592,7 @@ def make(nid: int) -> Message: ) # Build collected shares dict - collected_shares_dict: Dict[int, List[bytes]] = {} + collected_shares_dict: dict[int, list[bytes]] = {} for nid in state.sampled_node_ids: collected_shares_dict[nid] = [] for msg in msgs: @@ -600,8 +600,8 @@ def make(nid: int) -> Message: state.failures.append(Exception(msg.error)) continue res_dict = msg.content.configs_records[RECORD_KEY_CONFIGS] - nids = cast(List[int], res_dict[Key.NODE_ID_LIST]) - shares = cast(List[bytes], res_dict[Key.SHARE_LIST]) + nids = cast(list[int], res_dict[Key.NODE_ID_LIST]) + shares = cast(list[bytes], res_dict[Key.SHARE_LIST]) for owner_nid, share in zip(nids, shares): collected_shares_dict[owner_nid].append(share) diff --git a/src/py/flwr/simulation/app.py b/src/py/flwr/simulation/app.py index 973a9a89e652..62efc5197d3f 100644 --- a/src/py/flwr/simulation/app.py +++ b/src/py/flwr/simulation/app.py @@ -22,7 +22,7 @@ import traceback import warnings from logging import ERROR, INFO -from typing import Any, Dict, List, Optional, Type, Union +from typing import Any, Optional, Union import ray from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy @@ -36,7 +36,7 @@ from flwr.server.server import Server, init_defaults, run_fl from flwr.server.server_config import ServerConfig from flwr.server.strategy import Strategy -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from flwr.simulation.ray_transport.ray_actor import ( ClientAppActor, VirtualClientEngineActor, @@ -72,7 +72,7 @@ """ -NodeToPartitionMapping = Dict[int, int] +NodeToPartitionMapping = dict[int, int] def _create_node_id_to_partition_mapping( @@ -94,16 +94,16 @@ def start_simulation( *, client_fn: ClientFnExt, num_clients: int, - clients_ids: Optional[List[str]] = None, # UNSUPPORTED, WILL BE REMOVED - client_resources: Optional[Dict[str, float]] = None, + clients_ids: Optional[list[str]] = None, # UNSUPPORTED, WILL BE REMOVED + client_resources: Optional[dict[str, float]] = None, server: Optional[Server] = None, config: Optional[ServerConfig] = None, strategy: Optional[Strategy] = None, client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[Dict[str, Any]] = None, + ray_init_args: Optional[dict[str, Any]] = None, keep_initialised: Optional[bool] = False, - actor_type: Type[VirtualClientEngineActor] = ClientAppActor, - actor_kwargs: Optional[Dict[str, Any]] = None, + actor_type: type[VirtualClientEngineActor] = ClientAppActor, + actor_kwargs: Optional[dict[str, Any]] = None, actor_scheduling: Union[str, NodeAffinitySchedulingStrategy] = "DEFAULT", ) -> History: """Start a Ray-based Flower simulation server. @@ -279,7 +279,7 @@ def start_simulation( # An actor factory. This is called N times to add N actors # to the pool. If at some point the pool can accommodate more actors # this will be called again. - def create_actor_fn() -> Type[VirtualClientEngineActor]: + def create_actor_fn() -> type[VirtualClientEngineActor]: return actor_type.options( # type: ignore **client_resources, scheduling_strategy=actor_scheduling, diff --git a/src/py/flwr/simulation/ray_transport/ray_actor.py b/src/py/flwr/simulation/ray_transport/ray_actor.py index b1c9d2b9c0c1..4fb48a99b689 100644 --- a/src/py/flwr/simulation/ray_transport/ray_actor.py +++ b/src/py/flwr/simulation/ray_transport/ray_actor.py @@ -17,7 +17,7 @@ import threading from abc import ABC from logging import DEBUG, ERROR, WARNING -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union +from typing import Any, Callable, Optional, Union import ray from ray import ObjectRef @@ -44,7 +44,7 @@ def run( message: Message, cid: str, context: Context, - ) -> Tuple[str, Message, Context]: + ) -> tuple[str, Message, Context]: """Run a client run.""" # Pass message through ClientApp and return a message # return also cid which is needed to ensure results @@ -81,7 +81,7 @@ def __init__(self, on_actor_init_fn: Optional[Callable[[], None]] = None) -> Non on_actor_init_fn() -def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> int: +def pool_size_from_resources(client_resources: dict[str, Union[int, float]]) -> int: """Calculate number of Actors that fit in the cluster. For this we consider the resources available on each node and those required per @@ -124,14 +124,14 @@ def pool_size_from_resources(client_resources: Dict[str, Union[int, float]]) -> WARNING, "The ActorPool is empty. The system (CPUs=%s, GPUs=%s) " "does not meet the criteria to host at least one client with resources:" - " %s. Lowering the `client_resources` could help.", + " %s. Lowering these resources could help.", num_cpus, num_gpus, client_resources, ) raise ValueError( "ActorPool is empty. Stopping Simulation. " - "Check 'client_resources' passed to `start_simulation`" + "Check `num_cpus` and/or `num_gpus` passed to the simulation engine" ) return total_num_actors @@ -162,9 +162,9 @@ class VirtualClientEngineActorPool(ActorPool): def __init__( self, - create_actor_fn: Callable[[], Type[VirtualClientEngineActor]], - client_resources: Dict[str, Union[int, float]], - actor_list: Optional[List[Type[VirtualClientEngineActor]]] = None, + create_actor_fn: Callable[[], type[VirtualClientEngineActor]], + client_resources: dict[str, Union[int, float]], + actor_list: Optional[list[type[VirtualClientEngineActor]]] = None, ): self.client_resources = client_resources self.create_actor_fn = create_actor_fn @@ -183,10 +183,10 @@ def __init__( # A dict that maps cid to another dict containing: a reference to the remote job # and its status (i.e. whether it is ready or not) - self._cid_to_future: Dict[ - str, Dict[str, Union[bool, Optional[ObjectRef[Any]]]] + self._cid_to_future: dict[ + str, dict[str, Union[bool, Optional[ObjectRef[Any]]]] ] = {} - self.actor_to_remove: Set[str] = set() # a set + self.actor_to_remove: set[str] = set() # a set self.num_actors = len(actors) self.lock = threading.RLock() @@ -210,7 +210,7 @@ def add_actors_to_pool(self, num_actors: int) -> None: self._idle_actors.extend(new_actors) self.num_actors += num_actors - def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> None: + def submit(self, fn: Any, value: tuple[ClientAppFn, Message, str, Context]) -> None: """Take an idle actor and assign it to run a client app and Message. Submit a job to an actor by first removing it from the list of idle actors, then @@ -220,7 +220,7 @@ def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> N actor = self._idle_actors.pop() if self._check_and_remove_actor_from_pool(actor): future = fn(actor, app_fn, mssg, cid, context) - future_key = tuple(future) if isinstance(future, List) else future + future_key = tuple(future) if isinstance(future, list) else future self._future_to_actor[future_key] = (self._next_task_index, actor, cid) self._next_task_index += 1 @@ -228,7 +228,7 @@ def submit(self, fn: Any, value: Tuple[ClientAppFn, Message, str, Context]) -> N self._cid_to_future[cid]["future"] = future_key def submit_client_job( - self, actor_fn: Any, job: Tuple[ClientAppFn, Message, str, Context] + self, actor_fn: Any, job: tuple[ClientAppFn, Message, str, Context] ) -> None: """Submit a job while tracking client ids.""" _, _, cid, _ = job @@ -268,7 +268,7 @@ def _is_future_ready(self, cid: str) -> bool: return self._cid_to_future[cid]["ready"] # type: ignore - def _fetch_future_result(self, cid: str) -> Tuple[Message, Context]: + def _fetch_future_result(self, cid: str) -> tuple[Message, Context]: """Fetch result and updated context for a VirtualClient from Object Store. The job submitted by the ClientProxy interfacing with client with cid=cid is @@ -382,7 +382,7 @@ def process_unordered_future(self, timeout: Optional[float] = None) -> None: def get_client_result( self, cid: str, timeout: Optional[float] - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Get result from VirtualClient with specific cid.""" # Loop until all jobs submitted to the pool are completed. Break early # if the result for the ClientProxy calling this method is ready @@ -403,14 +403,14 @@ class BasicActorPool: def __init__( self, - actor_type: Type[VirtualClientEngineActor], - client_resources: Dict[str, Union[int, float]], - actor_kwargs: Dict[str, Any], + actor_type: type[VirtualClientEngineActor], + client_resources: dict[str, Union[int, float]], + actor_kwargs: dict[str, Any], ): self.client_resources = client_resources # Queue of idle actors - self.pool: List[VirtualClientEngineActor] = [] + self.pool: list[VirtualClientEngineActor] = [] self.num_actors = 0 # Resolve arguments to pass during actor init @@ -424,7 +424,7 @@ def __init__( # Figure out how many actors can be created given the cluster resources # and the resources the user indicates each VirtualClient will need self.actors_capacity = pool_size_from_resources(client_resources) - self._future_to_actor: Dict[Any, VirtualClientEngineActor] = {} + self._future_to_actor: dict[Any, VirtualClientEngineActor] = {} def is_actor_available(self) -> bool: """Return true if there is an idle actor.""" @@ -450,7 +450,7 @@ def terminate_all_actors(self) -> None: log(DEBUG, "Terminated %i actors", num_terminated) def submit( - self, actor_fn: Any, job: Tuple[ClientAppFn, Message, str, Context] + self, actor_fn: Any, job: tuple[ClientAppFn, Message, str, Context] ) -> Any: """On idle actor, submit job and return future.""" # Remove idle actor from pool @@ -470,7 +470,7 @@ def add_actor_back_to_pool(self, future: Any) -> None: def fetch_result_and_return_actor_to_pool( self, future: Any - ) -> Tuple[Message, Context]: + ) -> tuple[Message, Context]: """Pull result given a future and add actor back to pool.""" # Retrieve result for object store # Instead of doing ray.get(future) we await it diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py index 90e932aa8015..a5d4b27d3e5a 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy.py @@ -22,7 +22,7 @@ from flwr import common from flwr.client import ClientFnExt from flwr.client.client_app import ClientApp -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import DEFAULT_TTL, Message, Metadata, RecordSet from flwr.common.constant import ( NUM_PARTITIONS_KEY, @@ -48,7 +48,7 @@ class RayActorClientProxy(ClientProxy): """Flower client proxy which delegates work using Ray.""" - def __init__( # pylint: disable=too-many-arguments + def __init__( # pylint: disable=too-many-arguments,too-many-positional-arguments self, client_fn: ClientFnExt, node_id: int, @@ -65,7 +65,7 @@ def _load_app() -> ClientApp: self.app_fn = _load_app self.actor_pool = actor_pool - self.proxy_state = NodeState( + self.proxy_state = DeprecatedRunInfoStore( node_id=node_id, node_config={ PARTITION_ID_KEY: str(partition_id), diff --git a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py index 1c2aa455d9cd..780092ecb78e 100644 --- a/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py +++ b/src/py/flwr/simulation/ray_transport/ray_client_proxy_test.py @@ -17,13 +17,12 @@ from math import pi from random import shuffle -from typing import Dict, List, Tuple, Type import ray from flwr.client import Client, NumPyClient from flwr.client.client_app import ClientApp -from flwr.client.node_state import NodeState +from flwr.client.run_info_store import DeprecatedRunInfoStore from flwr.common import ( DEFAULT_TTL, Config, @@ -60,7 +59,7 @@ def __init__(self, node_id: int, state: RecordSet) -> None: self.node_id = node_id self.client_state = state - def get_properties(self, config: Config) -> Dict[str, Scalar]: + def get_properties(self, config: Config) -> dict[str, Scalar]: """Return properties by doing a simple calculation.""" result = self.node_id * pi # store something in context @@ -76,14 +75,14 @@ def get_dummy_client(context: Context) -> Client: def prep( - actor_type: Type[VirtualClientEngineActor] = ClientAppActor, -) -> Tuple[ - List[RayActorClientProxy], VirtualClientEngineActorPool, NodeToPartitionMapping + actor_type: type[VirtualClientEngineActor] = ClientAppActor, +) -> tuple[ + list[RayActorClientProxy], VirtualClientEngineActorPool, NodeToPartitionMapping ]: # pragma: no cover """Prepare ClientProxies and pool for tests.""" client_resources = {"num_cpus": 1, "num_gpus": 0.0} - def create_actor_fn() -> Type[VirtualClientEngineActor]: + def create_actor_fn() -> type[VirtualClientEngineActor]: return actor_type.options(**client_resources).remote() # type: ignore # Create actor pool @@ -143,7 +142,7 @@ def test_cid_consistency_all_submit_first_run_consistency() -> None: """Test that ClientProxies get the result of client job they submit. All jobs are submitted at the same time. Then fetched one at a time. This also tests - NodeState (at each Proxy) and RunState basic functionality. + DeprecatedRunInfoStore (at each Proxy) and RunState basic functionality. """ proxies, _, _ = prep() run_id = 0 @@ -194,10 +193,10 @@ def test_cid_consistency_without_proxies() -> None: _, pool, mapping = prep() node_ids = list(mapping.keys()) - # register node states - node_states: Dict[int, NodeState] = {} + # register DeprecatedRunInfoStores + node_info_stores: dict[int, DeprecatedRunInfoStore] = {} for node_id, partition_id in mapping.items(): - node_states[node_id] = NodeState( + node_info_stores[node_id] = DeprecatedRunInfoStore( node_id=node_id, node_config={ PARTITION_ID_KEY: str(partition_id), @@ -229,8 +228,8 @@ def _load_app() -> ClientApp: ), ) # register and retrieve context - node_states[node_id].register_context(run_id=run_id) - context = node_states[node_id].retrieve_context(run_id=run_id) + node_info_stores[node_id].register_context(run_id=run_id) + context = node_info_stores[node_id].retrieve_context(run_id=run_id) partition_id_str = str(context.node_config[PARTITION_ID_KEY]) pool.submit_client_job( lambda a, c_fn, j_fn, nid_, state: a.run.remote(c_fn, j_fn, nid_, state), diff --git a/src/py/flwr/simulation/run_simulation.py b/src/py/flwr/simulation/run_simulation.py index af12da4a5814..e9b2352e0c0c 100644 --- a/src/py/flwr/simulation/run_simulation.py +++ b/src/py/flwr/simulation/run_simulation.py @@ -25,7 +25,7 @@ from logging import DEBUG, ERROR, INFO, WARNING from pathlib import Path from time import sleep -from typing import Any, List, Optional +from typing import Any, Optional from flwr.cli.config_utils import load_and_validate from flwr.client import ClientApp @@ -44,8 +44,8 @@ from flwr.server.server_app import ServerApp from flwr.server.superlink.fleet import vce from flwr.server.superlink.fleet.vce.backend.backend import BackendConfig -from flwr.server.superlink.state import StateFactory -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate import LinkStateFactory +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from flwr.simulation.ray_transport.utils import ( enable_tf_gpu_growth as enable_gpu_growth, ) @@ -56,7 +56,7 @@ def _check_args_do_not_interfere(args: Namespace) -> bool: mode_one_args = ["app", "run_config"] mode_two_args = ["client_app", "server_app"] - def _resolve_message(conflict_keys: List[str]) -> str: + def _resolve_message(conflict_keys: list[str]) -> str: return ",".join([f"`--{key}`".replace("_", "-") for key in conflict_keys]) # When passing `--app`, `--app-dir` is ignored @@ -109,6 +109,11 @@ def run_simulation_from_cli() -> None: """Run Simulation Engine from the CLI.""" args = _parse_args_run_simulation().parse_args() + event( + EventType.CLI_FLOWER_SIMULATION_ENTER, + event_details={"backend": args.backend, "num-supernodes": args.num_supernodes}, + ) + # Add warnings for deprecated server_app and client_app arguments if args.server_app: warn_deprecated_feature( @@ -177,7 +182,9 @@ def run_simulation_from_cli() -> None: client_app_attr = app_components["clientapp"] server_app_attr = app_components["serverapp"] - override_config = parse_config_args([args.run_config]) + override_config = parse_config_args( + [args.run_config] if args.run_config else args.run_config + ) fused_config = get_fused_config_from_dir(app_path, override_config) app_dir = args.app is_app = True @@ -209,14 +216,16 @@ def run_simulation_from_cli() -> None: app_dir=app_dir, run=run, enable_tf_gpu_growth=args.enable_tf_gpu_growth, + delay_start=args.delay_start, verbose_logging=args.verbose, server_app_run_config=fused_config, is_app=is_app, + exit_event=EventType.CLI_FLOWER_SIMULATION_LEAVE, ) # Entry point from Python session (script or notebook) -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments,too-many-positional-arguments def run_simulation( server_app: ServerApp, client_app: ClientApp, @@ -265,6 +274,11 @@ def run_simulation( When disabled, only INFO, WARNING and ERROR log messages will be shown. If enabled, DEBUG-level logs will be displayed. """ + event( + EventType.PYTHON_API_RUN_SIMULATION_ENTER, + event_details={"backend": backend_name, "num-supernodes": num_supernodes}, + ) + if enable_tf_gpu_growth: warn_deprecated_feature_with_example( "Passing `enable_tf_gpu_growth=True` is deprecated.", @@ -282,10 +296,11 @@ def run_simulation( backend_config=backend_config, enable_tf_gpu_growth=enable_tf_gpu_growth, verbose_logging=verbose_logging, + exit_event=EventType.PYTHON_API_RUN_SIMULATION_LEAVE, ) -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments,too-many-positional-arguments def run_serverapp_th( server_app_attr: Optional[str], server_app: Optional[ServerApp], @@ -295,7 +310,6 @@ def run_serverapp_th( f_stop: threading.Event, has_exception: threading.Event, enable_tf_gpu_growth: bool, - delay_launch: int = 3, ) -> threading.Thread: """Run SeverApp in a thread.""" @@ -351,12 +365,11 @@ def server_th_with_start_checks( server_app, ), ) - sleep(delay_launch) serverapp_th.start() return serverapp_th -# pylint: disable=too-many-locals +# pylint: disable=too-many-locals,too-many-positional-arguments def _main_loop( num_supernodes: int, backend_name: str, @@ -365,6 +378,8 @@ def _main_loop( is_app: bool, enable_tf_gpu_growth: bool, run: Run, + exit_event: EventType, + delay_start: int, flwr_dir: Optional[str] = None, client_app: Optional[ClientApp] = None, client_app_attr: Optional[str] = None, @@ -372,14 +387,15 @@ def _main_loop( server_app_attr: Optional[str] = None, server_app_run_config: Optional[UserConfig] = None, ) -> None: - """Launch SuperLink with Simulation Engine, then ServerApp on a separate thread.""" + """Start ServerApp on a separate thread, then launch Simulation Engine.""" # Initialize StateFactory - state_factory = StateFactory(":flwr-in-memory-state:") + state_factory = LinkStateFactory(":flwr-in-memory-state:") f_stop = threading.Event() # A Threading event to indicate if an exception was raised in the ServerApp thread server_app_thread_has_exception = threading.Event() serverapp_th = None + success = True try: # Register run log(DEBUG, "Pre-registering run with id %s", run.run_id) @@ -403,8 +419,10 @@ def _main_loop( enable_tf_gpu_growth=enable_tf_gpu_growth, ) - # SuperLink with Simulation Engine - event(EventType.RUN_SUPERLINK_ENTER) + # Buffer time so the `ServerApp` in separate thread is ready + log(DEBUG, "Buffer time delay: %ds", delay_start) + sleep(delay_start) + # Start Simulation Engine vce.start_vce( num_supernodes=num_supernodes, client_app_attr=client_app_attr, @@ -422,13 +440,13 @@ def _main_loop( except Exception as ex: log(ERROR, "An exception occurred !! %s", ex) log(ERROR, traceback.format_exc()) + success = False raise RuntimeError("An error was encountered. Ending simulation.") from ex finally: # Trigger stop event f_stop.set() - - event(EventType.RUN_SUPERLINK_LEAVE) + event(exit_event, event_details={"success": success}) if serverapp_th: serverapp_th.join() if server_app_thread_has_exception.is_set(): @@ -437,9 +455,10 @@ def _main_loop( log(DEBUG, "Stopping Simulation Engine now.") -# pylint: disable=too-many-arguments,too-many-locals +# pylint: disable=too-many-arguments,too-many-locals,too-many-positional-arguments def _run_simulation( num_supernodes: int, + exit_event: EventType, client_app: Optional[ClientApp] = None, server_app: Optional[ServerApp] = None, backend_name: str = "ray", @@ -451,6 +470,7 @@ def _run_simulation( flwr_dir: Optional[str] = None, run: Optional[Run] = None, enable_tf_gpu_growth: bool = False, + delay_start: int = 5, verbose_logging: bool = False, is_app: bool = False, ) -> None: @@ -506,6 +526,8 @@ def _run_simulation( is_app, enable_tf_gpu_growth, run, + exit_event, + delay_start, flwr_dir, client_app, client_app_attr, @@ -593,6 +615,13 @@ def _parse_args_run_simulation() -> argparse.ArgumentParser: "Read more about how `tf.config.experimental.set_memory_growth()` works in " "the TensorFlow documentation: https://www.tensorflow.org/api/stable.", ) + parser.add_argument( + "--delay-start", + type=int, + default=3, + help="Buffer time (in seconds) to delay the start the simulation engine after " + "the `ServerApp`, which runs in a separate thread, has been launched.", + ) parser.add_argument( "--verbose", action="store_true", diff --git a/src/py/flwr/superexec/__init__.py b/src/py/flwr/superexec/__init__.py index a510c41f4182..0584ca663a02 100644 --- a/src/py/flwr/superexec/__init__.py +++ b/src/py/flwr/superexec/__init__.py @@ -13,9 +13,3 @@ # limitations under the License. # ============================================================================== """Flower SuperExec service.""" - -from .app import run_superexec as run_superexec - -__all__ = [ - "run_superexec", -] diff --git a/src/py/flwr/superexec/app.py b/src/py/flwr/superexec/app.py index 9510479ec8e1..4dcdfeefc4c9 100644 --- a/src/py/flwr/superexec/app.py +++ b/src/py/flwr/superexec/app.py @@ -18,15 +18,16 @@ import sys from logging import INFO, WARN from pathlib import Path -from typing import Optional, Tuple +from typing import Optional import grpc from flwr.common import EventType, event, log from flwr.common.address import parse_address from flwr.common.config import parse_config_args -from flwr.common.constant import SUPEREXEC_DEFAULT_ADDRESS +from flwr.common.constant import EXEC_API_DEFAULT_ADDRESS from flwr.common.exit_handlers import register_exit_handlers +from flwr.common.logger import warn_deprecated_feature from flwr.common.object_ref import load_app, validate from .exec_grpc import run_superexec_api_grpc @@ -37,6 +38,12 @@ def run_superexec() -> None: """Run Flower SuperExec.""" log(INFO, "Starting Flower SuperExec") + warn_deprecated_feature( + "Manually launching the SuperExec is deprecated. Since `flwr 1.13.0` " + "the executor service runs in the SuperLink. Launching it manually is not " + "recommended." + ) + event(EventType.RUN_SUPEREXEC_ENTER) args = _parse_args_run_superexec().parse_args() @@ -54,9 +61,11 @@ def run_superexec() -> None: # Start SuperExec API superexec_server: grpc.Server = run_superexec_api_grpc( address=address, - executor=_load_executor(args), + executor=load_executor(args), certificates=certificates, - config=parse_config_args([args.executor_config]), + config=parse_config_args( + [args.executor_config] if args.executor_config else args.executor_config + ), ) grpc_servers = [superexec_server] @@ -79,7 +88,7 @@ def _parse_args_run_superexec() -> argparse.ArgumentParser: parser.add_argument( "--address", help="SuperExec (gRPC) server address (IPv4, IPv6, or a domain name)", - default=SUPEREXEC_DEFAULT_ADDRESS, + default=EXEC_API_DEFAULT_ADDRESS, ) parser.add_argument( "--executor", @@ -128,7 +137,7 @@ def _parse_args_run_superexec() -> argparse.ArgumentParser: def _try_obtain_certificates( args: argparse.Namespace, -) -> Optional[Tuple[bytes, bytes, bytes]]: +) -> Optional[tuple[bytes, bytes, bytes]]: # Obtain certificates if args.insecure: log(WARN, "Option `--insecure` was set. Starting insecure HTTP server.") @@ -161,7 +170,7 @@ def _try_obtain_certificates( ) -def _load_executor( +def load_executor( args: argparse.Namespace, ) -> Executor: """Get the executor plugin.""" diff --git a/src/py/flwr/superexec/deployment.py b/src/py/flwr/superexec/deployment.py index 2354e047a1ec..331fd817228e 100644 --- a/src/py/flwr/superexec/deployment.py +++ b/src/py/flwr/superexec/deployment.py @@ -23,13 +23,13 @@ from typing_extensions import override from flwr.cli.install import install_from_fab +from flwr.common.constant import DRIVER_API_DEFAULT_ADDRESS from flwr.common.grpc import create_channel from flwr.common.logger import log from flwr.common.serde import fab_to_proto, user_config_to_proto from flwr.common.typing import Fab, UserConfig -from flwr.proto.driver_pb2 import CreateRunRequest # pylint: disable=E0611 from flwr.proto.driver_pb2_grpc import DriverStub -from flwr.server.driver.grpc_driver import DEFAULT_SERVER_ADDRESS_DRIVER +from flwr.proto.run_pb2 import CreateRunRequest # pylint: disable=E0611 from .executor import Executor, RunTracker @@ -50,7 +50,7 @@ class DeploymentEngine(Executor): def __init__( self, - superlink: str = DEFAULT_SERVER_ADDRESS_DRIVER, + superlink: str = DRIVER_API_DEFAULT_ADDRESS, root_certificates: Optional[str] = None, flwr_dir: Optional[str] = None, ) -> None: @@ -167,6 +167,8 @@ def start_run( # Execute the command proc = subprocess.Popen( # pylint: disable=consider-using-with command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, text=True, ) log(INFO, "Started run %s", str(run_id)) diff --git a/src/py/flwr/superexec/exec_grpc.py b/src/py/flwr/superexec/exec_grpc.py index a32ebc1b3e35..017395bc8002 100644 --- a/src/py/flwr/superexec/exec_grpc.py +++ b/src/py/flwr/superexec/exec_grpc.py @@ -15,7 +15,7 @@ """SuperExec gRPC API.""" from logging import INFO -from typing import Optional, Tuple +from typing import Optional import grpc @@ -32,7 +32,7 @@ def run_superexec_api_grpc( address: str, executor: Executor, - certificates: Optional[Tuple[bytes, bytes, bytes]], + certificates: Optional[tuple[bytes, bytes, bytes]], config: UserConfig, ) -> grpc.Server: """Run SuperExec API (gRPC, request-response).""" diff --git a/src/py/flwr/superexec/exec_servicer.py b/src/py/flwr/superexec/exec_servicer.py index dda3e96994de..ebb12b5ddbd2 100644 --- a/src/py/flwr/superexec/exec_servicer.py +++ b/src/py/flwr/superexec/exec_servicer.py @@ -15,8 +15,13 @@ """SuperExec API servicer.""" +import select +import sys +import threading +import time +from collections.abc import Generator from logging import ERROR, INFO -from typing import Any, Dict, Generator +from typing import Any import grpc @@ -32,13 +37,15 @@ from .executor import Executor, RunTracker +SELECT_TIMEOUT = 1 # Timeout for selecting ready-to-read file descriptors (in seconds) + class ExecServicer(exec_pb2_grpc.ExecServicer): """SuperExec API servicer.""" def __init__(self, executor: Executor) -> None: self.executor = executor - self.runs: Dict[int, RunTracker] = {} + self.runs: dict[int, RunTracker] = {} def StartRun( self, request: StartRunRequest, context: grpc.ServicerContext @@ -58,13 +65,72 @@ def StartRun( self.runs[run.run_id] = run + # Start a background thread to capture the log output + capture_thread = threading.Thread( + target=_capture_logs, args=(run,), daemon=True + ) + capture_thread.start() + return StartRunResponse(run_id=run.run_id) - def StreamLogs( + def StreamLogs( # pylint: disable=C0103 self, request: StreamLogsRequest, context: grpc.ServicerContext ) -> Generator[StreamLogsResponse, Any, None]: """Get logs.""" - logs = ["a", "b", "c"] + log(INFO, "ExecServicer.StreamLogs") + + # Exit if `run_id` not found + if request.run_id not in self.runs: + context.abort(grpc.StatusCode.NOT_FOUND, "Run ID not found") + + last_sent_index = 0 while context.is_active(): - for i in range(len(logs)): # pylint: disable=C0200 + # Yield n'th row of logs, if n'th row < len(logs) + logs = self.runs[request.run_id].logs + for i in range(last_sent_index, len(logs)): yield StreamLogsResponse(log_output=logs[i]) + last_sent_index = len(logs) + + # Wait for and continue to yield more log responses only if the + # run isn't completed yet. If the run is finished, the entire log + # is returned at this point and the server ends the stream. + if self.runs[request.run_id].proc.poll() is not None: + log(INFO, "All logs for run ID `%s` returned", request.run_id) + context.set_code(grpc.StatusCode.OK) + context.cancel() + + time.sleep(1.0) # Sleep briefly to avoid busy waiting + + +def _capture_logs( + run: RunTracker, +) -> None: + while True: + # Explicitly check if Popen.poll() is None. Required for `pytest`. + if run.proc.poll() is None: + # Select streams only when ready to read + ready_to_read, _, _ = select.select( + [run.proc.stdout, run.proc.stderr], + [], + [], + SELECT_TIMEOUT, + ) + # Read from std* and append to RunTracker.logs + for stream in ready_to_read: + # Flush stdout to view output in real time + readline = stream.readline() + sys.stdout.write(readline) + sys.stdout.flush() + # Append to logs + line = readline.rstrip() + if line: + run.logs.append(f"{line}") + + # Close std* to prevent blocking + elif run.proc.poll() is not None: + log(INFO, "Subprocess finished, exiting log capture") + if run.proc.stdout: + run.proc.stdout.close() + if run.proc.stderr: + run.proc.stderr.close() + break diff --git a/src/py/flwr/superexec/exec_servicer_test.py b/src/py/flwr/superexec/exec_servicer_test.py index 83717d63a36e..b777bc806fe5 100644 --- a/src/py/flwr/superexec/exec_servicer_test.py +++ b/src/py/flwr/superexec/exec_servicer_test.py @@ -16,11 +16,11 @@ import subprocess -from unittest.mock import MagicMock +from unittest.mock import MagicMock, Mock from flwr.proto.exec_pb2 import StartRunRequest # pylint: disable=E0611 -from .exec_servicer import ExecServicer +from .exec_servicer import ExecServicer, _capture_logs def test_start_run() -> None: @@ -50,3 +50,20 @@ def test_start_run() -> None: response = servicer.StartRun(request, context_mock) assert response.run_id == 10 + + +def test_capture_logs() -> None: + """Test capture_logs function.""" + run_res = Mock() + run_res.logs = [] + with subprocess.Popen( + ["echo", "success"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) as proc: + run_res.proc = proc + _capture_logs(run_res) + + assert len(run_res.logs) == 1 + assert run_res.logs[0] == "success" diff --git a/src/py/flwr/superexec/executor.py b/src/py/flwr/superexec/executor.py index 8d630d108b66..08b66a438e4d 100644 --- a/src/py/flwr/superexec/executor.py +++ b/src/py/flwr/superexec/executor.py @@ -15,7 +15,7 @@ """Execute and monitor a Flower run.""" from abc import ABC, abstractmethod -from dataclasses import dataclass +from dataclasses import dataclass, field from subprocess import Popen from typing import Optional @@ -28,6 +28,7 @@ class RunTracker: run_id: int proc: Popen # type: ignore + logs: list[str] = field(default_factory=list) class Executor(ABC): diff --git a/src/py/flwr/superexec/simulation.py b/src/py/flwr/superexec/simulation.py index e913b6812556..820d80a89ac7 100644 --- a/src/py/flwr/superexec/simulation.py +++ b/src/py/flwr/superexec/simulation.py @@ -29,7 +29,7 @@ from flwr.common.constant import RUN_ID_NUM_BYTES from flwr.common.logger import log from flwr.common.typing import UserConfig -from flwr.server.superlink.state.utils import generate_rand_int_from_bytes +from flwr.server.superlink.linkstate.utils import generate_rand_int_from_bytes from .executor import Executor, RunTracker diff --git a/src/py/flwr_tool/protoc_test.py b/src/py/flwr_tool/protoc_test.py index 6f9127304f25..f0784a4498d2 100644 --- a/src/py/flwr_tool/protoc_test.py +++ b/src/py/flwr_tool/protoc_test.py @@ -28,4 +28,4 @@ def test_directories() -> None: def test_proto_file_count() -> None: """Test if the correct number of proto files were captured by the glob.""" - assert len(PROTO_FILES) == 13 + assert len(PROTO_FILES) == 14 diff --git a/taplo.toml b/taplo.toml new file mode 100644 index 000000000000..23531011a9f7 --- /dev/null +++ b/taplo.toml @@ -0,0 +1,24 @@ +include = ["**/*.toml"] +exclude = ["baselines/**", "datasets/**"] + +[formatting] +align_comments = false +# Defaults below +align_entries = false +array_trailing_comma = true +array_auto_expand = true +array_auto_collapse = true +compact_arrays = true +compact_inline_tables = false +inline_table_expand = true +compact_entries = false +column_width = 80 +indent_tables = false +indent_entries = false +indent_string = " " +trailing_newline = true +reorder_keys = false +reorder_arrays = false +reorder_inline_tables = false +allowed_blank_lines = 2 +crlf = false